GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / scsi / qla2xxx / qla_mid.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9 #include "qla_target.h"
10
11 #include <linux/moduleparam.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15
16 #include <scsi/scsi_tcq.h>
17 #include <scsi/scsicam.h>
18 #include <linux/delay.h>
19
20 void
21 qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
22 {
23         if (vha->vp_idx && vha->timer_active) {
24                 del_timer_sync(&vha->timer);
25                 vha->timer_active = 0;
26         }
27 }
28
29 static uint32_t
30 qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
31 {
32         uint32_t vp_id;
33         struct qla_hw_data *ha = vha->hw;
34         unsigned long flags;
35
36         /* Find an empty slot and assign an vp_id */
37         mutex_lock(&ha->vport_lock);
38         vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
39         if (vp_id > ha->max_npiv_vports) {
40                 ql_dbg(ql_dbg_vport, vha, 0xa000,
41                     "vp_id %d is bigger than max-supported %d.\n",
42                     vp_id, ha->max_npiv_vports);
43                 mutex_unlock(&ha->vport_lock);
44                 return vp_id;
45         }
46
47         set_bit(vp_id, ha->vp_idx_map);
48         ha->num_vhosts++;
49         vha->vp_idx = vp_id;
50
51         spin_lock_irqsave(&ha->vport_slock, flags);
52         list_add_tail(&vha->list, &ha->vp_list);
53
54         qlt_update_vp_map(vha, SET_VP_IDX);
55
56         spin_unlock_irqrestore(&ha->vport_slock, flags);
57
58         mutex_unlock(&ha->vport_lock);
59         return vp_id;
60 }
61
62 void
63 qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
64 {
65         uint16_t vp_id;
66         struct qla_hw_data *ha = vha->hw;
67         unsigned long flags = 0;
68
69         mutex_lock(&ha->vport_lock);
70         /*
71          * Wait for all pending activities to finish before removing vport from
72          * the list.
73          * Lock needs to be held for safe removal from the list (it
74          * ensures no active vp_list traversal while the vport is removed
75          * from the queue)
76          */
77         wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
78             10*HZ);
79
80         spin_lock_irqsave(&ha->vport_slock, flags);
81         if (atomic_read(&vha->vref_count)) {
82                 ql_dbg(ql_dbg_vport, vha, 0xfffa,
83                     "vha->vref_count=%u timeout\n", vha->vref_count.counter);
84                 vha->vref_count = (atomic_t)ATOMIC_INIT(0);
85         }
86         list_del(&vha->list);
87         qlt_update_vp_map(vha, RESET_VP_IDX);
88         spin_unlock_irqrestore(&ha->vport_slock, flags);
89
90         vp_id = vha->vp_idx;
91         ha->num_vhosts--;
92         clear_bit(vp_id, ha->vp_idx_map);
93
94         mutex_unlock(&ha->vport_lock);
95 }
96
97 static scsi_qla_host_t *
98 qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
99 {
100         scsi_qla_host_t *vha;
101         struct scsi_qla_host *tvha;
102         unsigned long flags;
103
104         spin_lock_irqsave(&ha->vport_slock, flags);
105         /* Locate matching device in database. */
106         list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
107                 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
108                         spin_unlock_irqrestore(&ha->vport_slock, flags);
109                         return vha;
110                 }
111         }
112         spin_unlock_irqrestore(&ha->vport_slock, flags);
113         return NULL;
114 }
115
116 /*
117  * qla2x00_mark_vp_devices_dead
118  *      Updates fcport state when device goes offline.
119  *
120  * Input:
121  *      ha = adapter block pointer.
122  *      fcport = port structure pointer.
123  *
124  * Return:
125  *      None.
126  *
127  * Context:
128  */
129 static void
130 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
131 {
132         /*
133          * !!! NOTE !!!
134          * This function, if called in contexts other than vp create, disable
135          * or delete, please make sure this is synchronized with the
136          * delete thread.
137          */
138         fc_port_t *fcport;
139
140         list_for_each_entry(fcport, &vha->vp_fcports, list) {
141                 ql_dbg(ql_dbg_vport, vha, 0xa001,
142                     "Marking port dead, loop_id=0x%04x : %x.\n",
143                     fcport->loop_id, fcport->vha->vp_idx);
144
145                 qla2x00_mark_device_lost(vha, fcport, 0, 0);
146                 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
147         }
148 }
149
150 int
151 qla24xx_disable_vp(scsi_qla_host_t *vha)
152 {
153         unsigned long flags;
154         int ret;
155         fc_port_t *fcport;
156
157         ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
158         atomic_set(&vha->loop_state, LOOP_DOWN);
159         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
160         list_for_each_entry(fcport, &vha->vp_fcports, list)
161                 fcport->logout_on_delete = 0;
162
163         qla2x00_mark_all_devices_lost(vha, 0);
164
165         /* Remove port id from vp target map */
166         spin_lock_irqsave(&vha->hw->vport_slock, flags);
167         qlt_update_vp_map(vha, RESET_AL_PA);
168         spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
169
170         qla2x00_mark_vp_devices_dead(vha);
171         atomic_set(&vha->vp_state, VP_FAILED);
172         vha->flags.management_server_logged_in = 0;
173         if (ret == QLA_SUCCESS) {
174                 fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
175         } else {
176                 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
177                 return -1;
178         }
179         return 0;
180 }
181
182 int
183 qla24xx_enable_vp(scsi_qla_host_t *vha)
184 {
185         int ret;
186         struct qla_hw_data *ha = vha->hw;
187         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
188
189         /* Check if physical ha port is Up */
190         if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
191                 atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
192                 !(ha->current_topology & ISP_CFG_F)) {
193                 vha->vp_err_state =  VP_ERR_PORTDWN;
194                 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
195                 ql_dbg(ql_dbg_taskm, vha, 0x800b,
196                     "%s skip enable. loop_state %x topo %x\n",
197                     __func__, base_vha->loop_state.counter,
198                     ha->current_topology);
199
200                 goto enable_failed;
201         }
202
203         /* Initialize the new vport unless it is a persistent port */
204         mutex_lock(&ha->vport_lock);
205         ret = qla24xx_modify_vp_config(vha);
206         mutex_unlock(&ha->vport_lock);
207
208         if (ret != QLA_SUCCESS) {
209                 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
210                 goto enable_failed;
211         }
212
213         ql_dbg(ql_dbg_taskm, vha, 0x801a,
214             "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
215         return 0;
216
217 enable_failed:
218         ql_dbg(ql_dbg_taskm, vha, 0x801b,
219             "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
220         return 1;
221 }
222
223 static void
224 qla24xx_configure_vp(scsi_qla_host_t *vha)
225 {
226         struct fc_vport *fc_vport;
227         int ret;
228
229         fc_vport = vha->fc_vport;
230
231         ql_dbg(ql_dbg_vport, vha, 0xa002,
232             "%s: change request #3.\n", __func__);
233         ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
234         if (ret != QLA_SUCCESS) {
235                 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
236                     "receiving of RSCN requests: 0x%x.\n", ret);
237                 return;
238         } else {
239                 /* Corresponds to SCR enabled */
240                 clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
241         }
242
243         vha->flags.online = 1;
244         if (qla24xx_configure_vhba(vha))
245                 return;
246
247         atomic_set(&vha->vp_state, VP_ACTIVE);
248         fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
249 }
250
251 void
252 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
253 {
254         scsi_qla_host_t *vha;
255         struct qla_hw_data *ha = rsp->hw;
256         int i = 0;
257         unsigned long flags;
258
259         spin_lock_irqsave(&ha->vport_slock, flags);
260         list_for_each_entry(vha, &ha->vp_list, list) {
261                 if (vha->vp_idx) {
262                         atomic_inc(&vha->vref_count);
263                         spin_unlock_irqrestore(&ha->vport_slock, flags);
264
265                         switch (mb[0]) {
266                         case MBA_LIP_OCCURRED:
267                         case MBA_LOOP_UP:
268                         case MBA_LOOP_DOWN:
269                         case MBA_LIP_RESET:
270                         case MBA_POINT_TO_POINT:
271                         case MBA_CHG_IN_CONNECTION:
272                         case MBA_PORT_UPDATE:
273                         case MBA_RSCN_UPDATE:
274                                 ql_dbg(ql_dbg_async, vha, 0x5024,
275                                     "Async_event for VP[%d], mb=0x%x vha=%p.\n",
276                                     i, *mb, vha);
277                                 qla2x00_async_event(vha, rsp, mb);
278                                 break;
279                         }
280
281                         spin_lock_irqsave(&ha->vport_slock, flags);
282                         atomic_dec(&vha->vref_count);
283                         wake_up(&vha->vref_waitq);
284                 }
285                 i++;
286         }
287         spin_unlock_irqrestore(&ha->vport_slock, flags);
288 }
289
290 int
291 qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
292 {
293         /*
294          * Physical port will do most of the abort and recovery work. We can
295          * just treat it as a loop down
296          */
297         if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
298                 atomic_set(&vha->loop_state, LOOP_DOWN);
299                 qla2x00_mark_all_devices_lost(vha, 0);
300         } else {
301                 if (!atomic_read(&vha->loop_down_timer))
302                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
303         }
304
305         /*
306          * To exclusively reset vport, we need to log it out first.  Note: this
307          * control_vp can fail if ISP reset is already issued, this is
308          * expected, as the vp would be already logged out due to ISP reset.
309          */
310         if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
311                 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
312
313         ql_dbg(ql_dbg_taskm, vha, 0x801d,
314             "Scheduling enable of Vport %d.\n", vha->vp_idx);
315         return qla24xx_enable_vp(vha);
316 }
317
318 static int
319 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
320 {
321         struct qla_hw_data *ha = vha->hw;
322         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
323
324         ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
325             "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
326
327         qla2x00_do_work(vha);
328
329         /* Check if Fw is ready to configure VP first */
330         if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
331                 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
332                         /* VP acquired. complete port configuration */
333                         ql_dbg(ql_dbg_dpc, vha, 0x4014,
334                             "Configure VP scheduled.\n");
335                         qla24xx_configure_vp(vha);
336                         ql_dbg(ql_dbg_dpc, vha, 0x4015,
337                             "Configure VP end.\n");
338                         return 0;
339                 }
340         }
341
342         if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
343                 ql_dbg(ql_dbg_dpc, vha, 0x4016,
344                     "FCPort update scheduled.\n");
345                 qla2x00_update_fcports(vha);
346                 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
347                 ql_dbg(ql_dbg_dpc, vha, 0x4017,
348                     "FCPort update end.\n");
349         }
350
351         if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
352             !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
353             atomic_read(&vha->loop_state) != LOOP_DOWN) {
354
355                 if (!vha->relogin_jif ||
356                     time_after_eq(jiffies, vha->relogin_jif)) {
357                         vha->relogin_jif = jiffies + HZ;
358                         clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
359
360                         ql_dbg(ql_dbg_dpc, vha, 0x4018,
361                             "Relogin needed scheduled.\n");
362                         qla2x00_relogin(vha);
363                         ql_dbg(ql_dbg_dpc, vha, 0x4019,
364                             "Relogin needed end.\n");
365                 }
366         }
367
368         if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
369             (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
370                 clear_bit(RESET_ACTIVE, &vha->dpc_flags);
371         }
372
373         if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
374                 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
375                         ql_dbg(ql_dbg_dpc, vha, 0x401a,
376                             "Loop resync scheduled.\n");
377                         qla2x00_loop_resync(vha);
378                         clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
379                         ql_dbg(ql_dbg_dpc, vha, 0x401b,
380                             "Loop resync end.\n");
381                 }
382         }
383
384         ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
385             "Exiting %s.\n", __func__);
386         return 0;
387 }
388
389 void
390 qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
391 {
392         struct qla_hw_data *ha = vha->hw;
393         scsi_qla_host_t *vp;
394         unsigned long flags = 0;
395
396         if (vha->vp_idx)
397                 return;
398         if (list_empty(&ha->vp_list))
399                 return;
400
401         clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
402
403         if (!(ha->current_topology & ISP_CFG_F))
404                 return;
405
406         spin_lock_irqsave(&ha->vport_slock, flags);
407         list_for_each_entry(vp, &ha->vp_list, list) {
408                 if (vp->vp_idx) {
409                         atomic_inc(&vp->vref_count);
410                         spin_unlock_irqrestore(&ha->vport_slock, flags);
411
412                         qla2x00_do_dpc_vp(vp);
413
414                         spin_lock_irqsave(&ha->vport_slock, flags);
415                         atomic_dec(&vp->vref_count);
416                 }
417         }
418         spin_unlock_irqrestore(&ha->vport_slock, flags);
419 }
420
421 int
422 qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
423 {
424         scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
425         struct qla_hw_data *ha = base_vha->hw;
426         scsi_qla_host_t *vha;
427         uint8_t port_name[WWN_SIZE];
428
429         if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
430                 return VPCERR_UNSUPPORTED;
431
432         /* Check up the F/W and H/W support NPIV */
433         if (!ha->flags.npiv_supported)
434                 return VPCERR_UNSUPPORTED;
435
436         /* Check up whether npiv supported switch presented */
437         if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
438                 return VPCERR_NO_FABRIC_SUPP;
439
440         /* Check up unique WWPN */
441         u64_to_wwn(fc_vport->port_name, port_name);
442         if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
443                 return VPCERR_BAD_WWN;
444         vha = qla24xx_find_vhost_by_name(ha, port_name);
445         if (vha)
446                 return VPCERR_BAD_WWN;
447
448         /* Check up max-npiv-supports */
449         if (ha->num_vhosts > ha->max_npiv_vports) {
450                 ql_dbg(ql_dbg_vport, vha, 0xa004,
451                     "num_vhosts %ud is bigger "
452                     "than max_npiv_vports %ud.\n",
453                     ha->num_vhosts, ha->max_npiv_vports);
454                 return VPCERR_UNSUPPORTED;
455         }
456         return 0;
457 }
458
459 scsi_qla_host_t *
460 qla24xx_create_vhost(struct fc_vport *fc_vport)
461 {
462         scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
463         struct qla_hw_data *ha = base_vha->hw;
464         scsi_qla_host_t *vha;
465         struct scsi_host_template *sht = &qla2xxx_driver_template;
466         struct Scsi_Host *host;
467
468         vha = qla2x00_create_host(sht, ha);
469         if (!vha) {
470                 ql_log(ql_log_warn, vha, 0xa005,
471                     "scsi_host_alloc() failed for vport.\n");
472                 return(NULL);
473         }
474
475         host = vha->host;
476         fc_vport->dd_data = vha;
477         /* New host info */
478         u64_to_wwn(fc_vport->node_name, vha->node_name);
479         u64_to_wwn(fc_vport->port_name, vha->port_name);
480
481         vha->fc_vport = fc_vport;
482         vha->device_flags = 0;
483         vha->vp_idx = qla24xx_allocate_vp_id(vha);
484         if (vha->vp_idx > ha->max_npiv_vports) {
485                 ql_dbg(ql_dbg_vport, vha, 0xa006,
486                     "Couldn't allocate vp_id.\n");
487                 goto create_vhost_failed;
488         }
489         vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
490
491         vha->dpc_flags = 0L;
492
493         /*
494          * To fix the issue of processing a parent's RSCN for the vport before
495          * its SCR is complete.
496          */
497         set_bit(VP_SCR_NEEDED, &vha->vp_flags);
498         atomic_set(&vha->loop_state, LOOP_DOWN);
499         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
500
501         qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
502
503         vha->req = base_vha->req;
504         host->can_queue = base_vha->req->length + 128;
505         host->cmd_per_lun = 3;
506         if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
507                 host->max_cmd_len = 32;
508         else
509                 host->max_cmd_len = MAX_CMDSZ;
510         host->max_channel = MAX_BUSES - 1;
511         host->max_lun = ql2xmaxlun;
512         host->unique_id = host->host_no;
513         host->max_id = ha->max_fibre_devices;
514         host->transportt = qla2xxx_transport_vport_template;
515
516         ql_dbg(ql_dbg_vport, vha, 0xa007,
517             "Detect vport hba %ld at address = %p.\n",
518             vha->host_no, vha);
519
520         vha->flags.init_done = 1;
521
522         mutex_lock(&ha->vport_lock);
523         set_bit(vha->vp_idx, ha->vp_idx_map);
524         ha->cur_vport_count++;
525         mutex_unlock(&ha->vport_lock);
526
527         return vha;
528
529 create_vhost_failed:
530         return NULL;
531 }
532
533 static void
534 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
535 {
536         struct qla_hw_data *ha = vha->hw;
537         uint16_t que_id = req->id;
538
539         dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
540                 sizeof(request_t), req->ring, req->dma);
541         req->ring = NULL;
542         req->dma = 0;
543         if (que_id) {
544                 ha->req_q_map[que_id] = NULL;
545                 mutex_lock(&ha->vport_lock);
546                 clear_bit(que_id, ha->req_qid_map);
547                 mutex_unlock(&ha->vport_lock);
548         }
549         kfree(req->outstanding_cmds);
550         kfree(req);
551         req = NULL;
552 }
553
554 static void
555 qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
556 {
557         struct qla_hw_data *ha = vha->hw;
558         uint16_t que_id = rsp->id;
559
560         if (rsp->msix && rsp->msix->have_irq) {
561                 free_irq(rsp->msix->vector, rsp->msix->handle);
562                 rsp->msix->have_irq = 0;
563                 rsp->msix->in_use = 0;
564                 rsp->msix->handle = NULL;
565         }
566         dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
567                 sizeof(response_t), rsp->ring, rsp->dma);
568         rsp->ring = NULL;
569         rsp->dma = 0;
570         if (que_id) {
571                 ha->rsp_q_map[que_id] = NULL;
572                 mutex_lock(&ha->vport_lock);
573                 clear_bit(que_id, ha->rsp_qid_map);
574                 mutex_unlock(&ha->vport_lock);
575         }
576         kfree(rsp);
577         rsp = NULL;
578 }
579
580 int
581 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
582 {
583         int ret = QLA_SUCCESS;
584
585         if (req && vha->flags.qpairs_req_created) {
586                 req->options |= BIT_0;
587                 ret = qla25xx_init_req_que(vha, req);
588                 if (ret != QLA_SUCCESS)
589                         return QLA_FUNCTION_FAILED;
590
591                 qla25xx_free_req_que(vha, req);
592         }
593
594         return ret;
595 }
596
597 int
598 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
599 {
600         int ret = QLA_SUCCESS;
601
602         if (rsp && vha->flags.qpairs_rsp_created) {
603                 rsp->options |= BIT_0;
604                 ret = qla25xx_init_rsp_que(vha, rsp);
605                 if (ret != QLA_SUCCESS)
606                         return QLA_FUNCTION_FAILED;
607
608                 qla25xx_free_rsp_que(vha, rsp);
609         }
610
611         return ret;
612 }
613
614 /* Delete all queues for a given vhost */
615 int
616 qla25xx_delete_queues(struct scsi_qla_host *vha)
617 {
618         int cnt, ret = 0;
619         struct req_que *req = NULL;
620         struct rsp_que *rsp = NULL;
621         struct qla_hw_data *ha = vha->hw;
622         struct qla_qpair *qpair, *tqpair;
623
624         if (ql2xmqsupport) {
625                 list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
626                     qp_list_elem)
627                         qla2xxx_delete_qpair(vha, qpair);
628         } else {
629                 /* Delete request queues */
630                 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
631                         req = ha->req_q_map[cnt];
632                         if (req && test_bit(cnt, ha->req_qid_map)) {
633                                 ret = qla25xx_delete_req_que(vha, req);
634                                 if (ret != QLA_SUCCESS) {
635                                         ql_log(ql_log_warn, vha, 0x00ea,
636                                             "Couldn't delete req que %d.\n",
637                                             req->id);
638                                         return ret;
639                                 }
640                         }
641                 }
642
643                 /* Delete response queues */
644                 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
645                         rsp = ha->rsp_q_map[cnt];
646                         if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
647                                 ret = qla25xx_delete_rsp_que(vha, rsp);
648                                 if (ret != QLA_SUCCESS) {
649                                         ql_log(ql_log_warn, vha, 0x00eb,
650                                             "Couldn't delete rsp que %d.\n",
651                                             rsp->id);
652                                         return ret;
653                                 }
654                         }
655                 }
656         }
657
658         return ret;
659 }
660
661 int
662 qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
663     uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
664 {
665         int ret = 0;
666         struct req_que *req = NULL;
667         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
668         struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
669         uint16_t que_id = 0;
670         device_reg_t *reg;
671         uint32_t cnt;
672
673         req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
674         if (req == NULL) {
675                 ql_log(ql_log_fatal, base_vha, 0x00d9,
676                     "Failed to allocate memory for request queue.\n");
677                 goto failed;
678         }
679
680         req->length = REQUEST_ENTRY_CNT_24XX;
681         req->ring = dma_alloc_coherent(&ha->pdev->dev,
682                         (req->length + 1) * sizeof(request_t),
683                         &req->dma, GFP_KERNEL);
684         if (req->ring == NULL) {
685                 ql_log(ql_log_fatal, base_vha, 0x00da,
686                     "Failed to allocate memory for request_ring.\n");
687                 goto que_failed;
688         }
689
690         ret = qla2x00_alloc_outstanding_cmds(ha, req);
691         if (ret != QLA_SUCCESS)
692                 goto que_failed;
693
694         mutex_lock(&ha->mq_lock);
695         que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
696         if (que_id >= ha->max_req_queues) {
697                 mutex_unlock(&ha->mq_lock);
698                 ql_log(ql_log_warn, base_vha, 0x00db,
699                     "No resources to create additional request queue.\n");
700                 goto que_failed;
701         }
702         set_bit(que_id, ha->req_qid_map);
703         ha->req_q_map[que_id] = req;
704         req->rid = rid;
705         req->vp_idx = vp_idx;
706         req->qos = qos;
707
708         ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
709             "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
710             que_id, req->rid, req->vp_idx, req->qos);
711         ql_dbg(ql_dbg_init, base_vha, 0x00dc,
712             "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
713             que_id, req->rid, req->vp_idx, req->qos);
714         if (rsp_que < 0)
715                 req->rsp = NULL;
716         else
717                 req->rsp = ha->rsp_q_map[rsp_que];
718         /* Use alternate PCI bus number */
719         if (MSB(req->rid))
720                 options |= BIT_4;
721         /* Use alternate PCI devfn */
722         if (LSB(req->rid))
723                 options |= BIT_5;
724         req->options = options;
725
726         ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
727             "options=0x%x.\n", req->options);
728         ql_dbg(ql_dbg_init, base_vha, 0x00dd,
729             "options=0x%x.\n", req->options);
730         for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
731                 req->outstanding_cmds[cnt] = NULL;
732         req->current_outstanding_cmd = 1;
733
734         req->ring_ptr = req->ring;
735         req->ring_index = 0;
736         req->cnt = req->length;
737         req->id = que_id;
738         reg = ISP_QUE_REG(ha, que_id);
739         req->req_q_in = &reg->isp25mq.req_q_in;
740         req->req_q_out = &reg->isp25mq.req_q_out;
741         req->max_q_depth = ha->req_q_map[0]->max_q_depth;
742         req->out_ptr = (void *)(req->ring + req->length);
743         mutex_unlock(&ha->mq_lock);
744         ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
745             "ring_ptr=%p ring_index=%d, "
746             "cnt=%d id=%d max_q_depth=%d.\n",
747             req->ring_ptr, req->ring_index,
748             req->cnt, req->id, req->max_q_depth);
749         ql_dbg(ql_dbg_init, base_vha, 0x00de,
750             "ring_ptr=%p ring_index=%d, "
751             "cnt=%d id=%d max_q_depth=%d.\n",
752             req->ring_ptr, req->ring_index, req->cnt,
753             req->id, req->max_q_depth);
754
755         if (startqp) {
756                 ret = qla25xx_init_req_que(base_vha, req);
757                 if (ret != QLA_SUCCESS) {
758                         ql_log(ql_log_fatal, base_vha, 0x00df,
759                             "%s failed.\n", __func__);
760                         mutex_lock(&ha->mq_lock);
761                         clear_bit(que_id, ha->req_qid_map);
762                         mutex_unlock(&ha->mq_lock);
763                         goto que_failed;
764                 }
765                 vha->flags.qpairs_req_created = 1;
766         }
767
768         return req->id;
769
770 que_failed:
771         qla25xx_free_req_que(base_vha, req);
772 failed:
773         return 0;
774 }
775
776 static void qla_do_work(struct work_struct *work)
777 {
778         unsigned long flags;
779         struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
780         struct scsi_qla_host *vha;
781         struct qla_hw_data *ha = qpair->hw;
782         struct srb_iocb *nvme, *nxt_nvme;
783
784         spin_lock_irqsave(&qpair->qp_lock, flags);
785         vha = pci_get_drvdata(ha->pdev);
786         qla24xx_process_response_queue(vha, qpair->rsp);
787         spin_unlock_irqrestore(&qpair->qp_lock, flags);
788
789         list_for_each_entry_safe(nvme, nxt_nvme, &qpair->nvme_done_list,
790                     u.nvme.entry) {
791                 list_del_init(&nvme->u.nvme.entry);
792                 qla_nvme_cmpl_io(nvme);
793         }
794 }
795
796 /* create response queue */
797 int
798 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
799     uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
800 {
801         int ret = 0;
802         struct rsp_que *rsp = NULL;
803         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
804         struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
805         uint16_t que_id = 0;
806         device_reg_t *reg;
807
808         rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
809         if (rsp == NULL) {
810                 ql_log(ql_log_warn, base_vha, 0x0066,
811                     "Failed to allocate memory for response queue.\n");
812                 goto failed;
813         }
814
815         rsp->length = RESPONSE_ENTRY_CNT_MQ;
816         rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
817                         (rsp->length + 1) * sizeof(response_t),
818                         &rsp->dma, GFP_KERNEL);
819         if (rsp->ring == NULL) {
820                 ql_log(ql_log_warn, base_vha, 0x00e1,
821                     "Failed to allocate memory for response ring.\n");
822                 goto que_failed;
823         }
824
825         mutex_lock(&ha->mq_lock);
826         que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
827         if (que_id >= ha->max_rsp_queues) {
828                 mutex_unlock(&ha->mq_lock);
829                 ql_log(ql_log_warn, base_vha, 0x00e2,
830                     "No resources to create additional request queue.\n");
831                 goto que_failed;
832         }
833         set_bit(que_id, ha->rsp_qid_map);
834
835         rsp->msix = qpair->msix;
836
837         ha->rsp_q_map[que_id] = rsp;
838         rsp->rid = rid;
839         rsp->vp_idx = vp_idx;
840         rsp->hw = ha;
841         ql_dbg(ql_dbg_init, base_vha, 0x00e4,
842             "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
843             que_id, rsp->rid, rsp->vp_idx, rsp->hw);
844         /* Use alternate PCI bus number */
845         if (MSB(rsp->rid))
846                 options |= BIT_4;
847         /* Use alternate PCI devfn */
848         if (LSB(rsp->rid))
849                 options |= BIT_5;
850         /* Enable MSIX handshake mode on for uncapable adapters */
851         if (!IS_MSIX_NACK_CAPABLE(ha))
852                 options |= BIT_6;
853
854         /* Set option to indicate response queue creation */
855         options |= BIT_1;
856
857         rsp->options = options;
858         rsp->id = que_id;
859         reg = ISP_QUE_REG(ha, que_id);
860         rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
861         rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
862         rsp->in_ptr = (void *)(rsp->ring + rsp->length);
863         mutex_unlock(&ha->mq_lock);
864         ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
865             "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
866             rsp->options, rsp->id, rsp->rsp_q_in,
867             rsp->rsp_q_out);
868         ql_dbg(ql_dbg_init, base_vha, 0x00e5,
869             "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
870             rsp->options, rsp->id, rsp->rsp_q_in,
871             rsp->rsp_q_out);
872
873         ret = qla25xx_request_irq(ha, qpair, qpair->msix,
874             QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
875         if (ret)
876                 goto que_failed;
877
878         if (startqp) {
879                 ret = qla25xx_init_rsp_que(base_vha, rsp);
880                 if (ret != QLA_SUCCESS) {
881                         ql_log(ql_log_fatal, base_vha, 0x00e7,
882                             "%s failed.\n", __func__);
883                         mutex_lock(&ha->mq_lock);
884                         clear_bit(que_id, ha->rsp_qid_map);
885                         mutex_unlock(&ha->mq_lock);
886                         goto que_failed;
887                 }
888                 vha->flags.qpairs_rsp_created = 1;
889         }
890         rsp->req = NULL;
891
892         qla2x00_init_response_q_entries(rsp);
893         if (qpair->hw->wq)
894                 INIT_WORK(&qpair->q_work, qla_do_work);
895         return rsp->id;
896
897 que_failed:
898         qla25xx_free_rsp_que(base_vha, rsp);
899 failed:
900         return 0;
901 }