2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/mempool.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/workqueue.h>
22 #include <linux/pci.h>
23 #include <linux/scatterlist.h>
24 #include <linux/skbuff.h>
25 #include <linux/spinlock.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/delay.h>
29 #include <linux/gfp.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_tcq.h>
35 #include <scsi/fc/fc_els.h>
36 #include <scsi/fc/fc_fcoe.h>
37 #include <scsi/libfc.h>
38 #include <scsi/fc_frame.h>
42 const char *fnic_state_str[] = {
43 [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
44 [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
45 [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
46 [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
49 static const char *fnic_ioreq_state_str[] = {
50 [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
51 [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
52 [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
53 [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
54 [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
57 static const char *fcpio_status_str[] = {
58 [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
59 [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
60 [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
61 [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
62 [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
63 [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
64 [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
65 [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
66 [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
67 [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
68 [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
69 [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
70 [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
71 [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
72 [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
73 [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
74 [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
75 [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
76 [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
79 const char *fnic_state_to_str(unsigned int state)
81 if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
84 return fnic_state_str[state];
87 static const char *fnic_ioreq_state_to_str(unsigned int state)
89 if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
90 !fnic_ioreq_state_str[state])
93 return fnic_ioreq_state_str[state];
96 static const char *fnic_fcpio_status_to_str(unsigned int status)
98 if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
101 return fcpio_status_str[status];
104 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
106 static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
107 struct scsi_cmnd *sc)
109 u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
111 return &fnic->io_req_lock[hash];
114 static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
117 return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
121 * Unmap the data buffer and sense buffer for an io_req,
122 * also unmap and free the device-private scatter/gather list.
124 static void fnic_release_ioreq_buf(struct fnic *fnic,
125 struct fnic_io_req *io_req,
126 struct scsi_cmnd *sc)
128 if (io_req->sgl_list_pa)
129 pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
130 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
135 mempool_free(io_req->sgl_list_alloc,
136 fnic->io_sgl_pool[io_req->sgl_type]);
137 if (io_req->sense_buf_pa)
138 pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
139 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
142 /* Free up Copy Wq descriptors. Called with copy_wq lock held */
143 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
145 /* if no Ack received from firmware, then nothing to clean */
146 if (!fnic->fw_ack_recd[0])
150 * Update desc_available count based on number of freed descriptors
151 * Account for wraparound
153 if (wq->to_clean_index <= fnic->fw_ack_index[0])
154 wq->ring.desc_avail += (fnic->fw_ack_index[0]
155 - wq->to_clean_index + 1);
157 wq->ring.desc_avail += (wq->ring.desc_count
159 + fnic->fw_ack_index[0] + 1);
162 * just bump clean index to ack_index+1 accounting for wraparound
163 * this will essentially free up all descriptors between
164 * to_clean_index and fw_ack_index, both inclusive
167 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
169 /* we have processed the acks received so far */
170 fnic->fw_ack_recd[0] = 0;
176 * __fnic_set_state_flags
177 * Sets/Clears bits in fnic's state_flags
180 __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
181 unsigned long clearbits)
183 struct Scsi_Host *host = fnic->lport->host;
184 int sh_locked = spin_is_locked(host->host_lock);
185 unsigned long flags = 0;
188 spin_lock_irqsave(host->host_lock, flags);
191 fnic->state_flags &= ~st_flags;
193 fnic->state_flags |= st_flags;
196 spin_unlock_irqrestore(host->host_lock, flags);
203 * fnic_fw_reset_handler
204 * Routine to send reset msg to fw
206 int fnic_fw_reset_handler(struct fnic *fnic)
208 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
212 /* indicate fwreset to io path */
213 fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
215 skb_queue_purge(&fnic->frame_queue);
216 skb_queue_purge(&fnic->tx_queue);
218 /* wait for io cmpl */
219 while (atomic_read(&fnic->in_flight))
220 schedule_timeout(msecs_to_jiffies(1));
222 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
224 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
225 free_wq_copy_descs(fnic, wq);
227 if (!vnic_wq_copy_desc_avail(wq))
230 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
231 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
232 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
233 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
234 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
236 &fnic->fnic_stats.fw_stats.active_fw_reqs));
239 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
242 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
243 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
244 "Issued fw reset\n");
246 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
247 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
248 "Failed to issue fw reset\n");
256 * fnic_flogi_reg_handler
257 * Routine to send flogi register msg to fw
259 int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
261 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
262 enum fcpio_flogi_reg_format_type format;
263 struct fc_lport *lp = fnic->lport;
268 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
270 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
271 free_wq_copy_descs(fnic, wq);
273 if (!vnic_wq_copy_desc_avail(wq)) {
275 goto flogi_reg_ioreq_end;
278 if (fnic->ctlr.map_dest) {
279 memset(gw_mac, 0xff, ETH_ALEN);
280 format = FCPIO_FLOGI_REG_DEF_DEST;
282 memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
283 format = FCPIO_FLOGI_REG_GW_DEST;
286 if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
287 fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
290 lp->r_a_tov, lp->e_d_tov);
291 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
292 "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
293 fc_id, fnic->data_src_addr, gw_mac);
295 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
296 format, fc_id, gw_mac);
297 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
298 "FLOGI reg issued fcid %x map %d dest %pM\n",
299 fc_id, fnic->ctlr.map_dest, gw_mac);
302 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
303 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
304 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
305 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
306 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
309 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
314 * fnic_queue_wq_copy_desc
315 * Routine to enqueue a wq copy desc
317 static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
318 struct vnic_wq_copy *wq,
319 struct fnic_io_req *io_req,
320 struct scsi_cmnd *sc,
323 struct scatterlist *sg;
324 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
325 struct fc_rport_libfc_priv *rp = rport->dd_data;
326 struct host_sg_desc *desc;
327 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
329 unsigned long intr_flags;
332 struct scsi_lun fc_lun;
336 /* For each SGE, create a device desc entry */
337 desc = io_req->sgl_list;
338 for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
339 desc->addr = cpu_to_le64(sg_dma_address(sg));
340 desc->len = cpu_to_le32(sg_dma_len(sg));
345 io_req->sgl_list_pa = pci_map_single
348 sizeof(io_req->sgl_list[0]) * sg_count,
351 r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
353 printk(KERN_ERR "PCI mapping failed with error %d\n", r);
354 return SCSI_MLQUEUE_HOST_BUSY;
358 io_req->sense_buf_pa = pci_map_single(fnic->pdev,
360 SCSI_SENSE_BUFFERSIZE,
363 r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa);
365 pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
366 sizeof(io_req->sgl_list[0]) * sg_count,
368 printk(KERN_ERR "PCI mapping failed with error %d\n", r);
369 return SCSI_MLQUEUE_HOST_BUSY;
372 int_to_scsilun(sc->device->lun, &fc_lun);
374 /* Enqueue the descriptor in the Copy WQ */
375 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
377 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
378 free_wq_copy_descs(fnic, wq);
380 if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
381 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
382 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
383 "fnic_queue_wq_copy_desc failure - no descriptors\n");
384 atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
385 return SCSI_MLQUEUE_HOST_BUSY;
389 if (sc->sc_data_direction == DMA_FROM_DEVICE)
390 flags = FCPIO_ICMND_RDDATA;
391 else if (sc->sc_data_direction == DMA_TO_DEVICE)
392 flags = FCPIO_ICMND_WRDATA;
395 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
396 (rp->flags & FC_RP_FLAGS_RETRY))
397 exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
399 fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
400 0, exch_flags, io_req->sgl_cnt,
401 SCSI_SENSE_BUFFERSIZE,
403 io_req->sense_buf_pa,
404 0, /* scsi cmd ref, always 0 */
405 FCPIO_ICMND_PTA_SIMPLE,
406 /* scsi pri and tag */
407 flags, /* command flags */
408 sc->cmnd, sc->cmd_len,
410 fc_lun.scsi_lun, io_req->port_id,
411 rport->maxframe_size, rp->r_a_tov,
414 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
415 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
416 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
417 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
418 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
420 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
426 * Routine to send a scsi cdb
427 * Called with host_lock held and interrupts disabled.
429 static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
431 struct fc_lport *lp = shost_priv(sc->device->host);
432 struct fc_rport *rport;
433 struct fnic_io_req *io_req = NULL;
434 struct fnic *fnic = lport_priv(lp);
435 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
436 struct vnic_wq_copy *wq;
440 unsigned long flags = 0;
442 spinlock_t *io_lock = NULL;
443 int io_lock_acquired = 0;
445 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
446 return SCSI_MLQUEUE_HOST_BUSY;
448 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
449 return SCSI_MLQUEUE_HOST_BUSY;
451 rport = starget_to_rport(scsi_target(sc->device));
452 ret = fc_remote_port_chkready(rport);
454 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
461 struct fc_rport_libfc_priv *rp = rport->dd_data;
463 if (!rp || rp->rp_state != RPORT_ST_READY) {
464 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
465 "returning DID_NO_CONNECT for IO as rport is removed\n");
466 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
467 sc->result = DID_NO_CONNECT<<16;
473 if (lp->state != LPORT_ST_READY || !(lp->link_up))
474 return SCSI_MLQUEUE_HOST_BUSY;
476 atomic_inc(&fnic->in_flight);
479 * Release host lock, use driver resource specific locks from here.
480 * Don't re-enable interrupts in case they were disabled prior to the
481 * caller disabling them.
483 spin_unlock(lp->host->host_lock);
484 CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
485 CMD_FLAGS(sc) = FNIC_NO_FLAGS;
487 /* Get a new io_req for this SCSI IO */
488 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
490 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
491 ret = SCSI_MLQUEUE_HOST_BUSY;
494 memset(io_req, 0, sizeof(*io_req));
496 /* Map the data buffer */
497 sg_count = scsi_dma_map(sc);
499 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
500 sc->request->tag, sc, 0, sc->cmnd[0],
501 sg_count, CMD_STATE(sc));
502 mempool_free(io_req, fnic->io_req_pool);
506 /* Determine the type of scatter/gather list we need */
507 io_req->sgl_cnt = sg_count;
508 io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
509 if (sg_count > FNIC_DFLT_SG_DESC_CNT)
510 io_req->sgl_type = FNIC_SGL_CACHE_MAX;
514 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
516 if (!io_req->sgl_list) {
517 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
518 ret = SCSI_MLQUEUE_HOST_BUSY;
520 mempool_free(io_req, fnic->io_req_pool);
524 /* Cache sgl list allocated address before alignment */
525 io_req->sgl_list_alloc = io_req->sgl_list;
526 ptr = (unsigned long) io_req->sgl_list;
527 if (ptr % FNIC_SG_DESC_ALIGN) {
528 io_req->sgl_list = (struct host_sg_desc *)
529 (((unsigned long) ptr
530 + FNIC_SG_DESC_ALIGN - 1)
531 & ~(FNIC_SG_DESC_ALIGN - 1));
536 * Will acquire lock defore setting to IO initialized.
539 io_lock = fnic_io_lock_hash(fnic, sc);
540 spin_lock_irqsave(io_lock, flags);
542 /* initialize rest of io_req */
543 io_lock_acquired = 1;
544 io_req->port_id = rport->port_id;
545 io_req->start_time = jiffies;
546 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
547 CMD_SP(sc) = (char *)io_req;
548 CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
549 sc->scsi_done = done;
551 /* create copy wq desc and enqueue it */
552 wq = &fnic->wq_copy[0];
553 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
556 * In case another thread cancelled the request,
557 * refetch the pointer under the lock.
559 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
560 sc->request->tag, sc, 0, 0, 0,
561 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
562 io_req = (struct fnic_io_req *)CMD_SP(sc);
564 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
565 spin_unlock_irqrestore(io_lock, flags);
567 fnic_release_ioreq_buf(fnic, io_req, sc);
568 mempool_free(io_req, fnic->io_req_pool);
570 atomic_dec(&fnic->in_flight);
571 /* acquire host lock before returning to SCSI */
572 spin_lock(lp->host->host_lock);
575 atomic64_inc(&fnic_stats->io_stats.active_ios);
576 atomic64_inc(&fnic_stats->io_stats.num_ios);
577 if (atomic64_read(&fnic_stats->io_stats.active_ios) >
578 atomic64_read(&fnic_stats->io_stats.max_active_ios))
579 atomic64_set(&fnic_stats->io_stats.max_active_ios,
580 atomic64_read(&fnic_stats->io_stats.active_ios));
582 /* REVISIT: Use per IO lock in the final code */
583 CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
586 cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
587 (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
588 (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
591 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
592 sc->request->tag, sc, io_req,
594 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
596 /* if only we issued IO, will we have the io lock */
597 if (io_lock_acquired)
598 spin_unlock_irqrestore(io_lock, flags);
600 atomic_dec(&fnic->in_flight);
601 /* acquire host lock before returning to SCSI */
602 spin_lock(lp->host->host_lock);
606 DEF_SCSI_QCMD(fnic_queuecommand)
609 * fnic_fcpio_fw_reset_cmpl_handler
610 * Routine to handle fw reset completion
612 static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
613 struct fcpio_fw_req *desc)
617 struct fcpio_tag tag;
620 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
622 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
624 atomic64_inc(&reset_stats->fw_reset_completions);
626 /* Clean up all outstanding io requests */
627 fnic_cleanup_io(fnic, SCSI_NO_TAG);
629 atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
630 atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
632 spin_lock_irqsave(&fnic->fnic_lock, flags);
634 /* fnic should be in FC_TRANS_ETH_MODE */
635 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
636 /* Check status of reset completion */
638 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
639 "reset cmpl success\n");
640 /* Ready to send flogi out */
641 fnic->state = FNIC_IN_ETH_MODE;
643 FNIC_SCSI_DBG(KERN_DEBUG,
645 "fnic fw_reset : failed %s\n",
646 fnic_fcpio_status_to_str(hdr_status));
649 * Unable to change to eth mode, cannot send out flogi
650 * Change state to fc mode, so that subsequent Flogi
651 * requests from libFC will cause more attempts to
652 * reset the firmware. Free the cached flogi
654 fnic->state = FNIC_IN_FC_MODE;
655 atomic64_inc(&reset_stats->fw_reset_failures);
659 FNIC_SCSI_DBG(KERN_DEBUG,
661 "Unexpected state %s while processing"
662 " reset cmpl\n", fnic_state_to_str(fnic->state));
663 atomic64_inc(&reset_stats->fw_reset_failures);
667 /* Thread removing device blocks till firmware reset is complete */
668 if (fnic->remove_wait)
669 complete(fnic->remove_wait);
672 * If fnic is being removed, or fw reset failed
673 * free the flogi frame. Else, send it out
675 if (fnic->remove_wait || ret) {
676 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
677 skb_queue_purge(&fnic->tx_queue);
678 goto reset_cmpl_handler_end;
681 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
685 reset_cmpl_handler_end:
686 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
692 * fnic_fcpio_flogi_reg_cmpl_handler
693 * Routine to handle flogi register completion
695 static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
696 struct fcpio_fw_req *desc)
700 struct fcpio_tag tag;
704 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
706 /* Update fnic state based on status of flogi reg completion */
707 spin_lock_irqsave(&fnic->fnic_lock, flags);
709 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
711 /* Check flogi registration completion status */
713 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
714 "flog reg succeeded\n");
715 fnic->state = FNIC_IN_FC_MODE;
717 FNIC_SCSI_DBG(KERN_DEBUG,
719 "fnic flogi reg :failed %s\n",
720 fnic_fcpio_status_to_str(hdr_status));
721 fnic->state = FNIC_IN_ETH_MODE;
725 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
726 "Unexpected fnic state %s while"
727 " processing flogi reg completion\n",
728 fnic_state_to_str(fnic->state));
733 if (fnic->stop_rx_link_events) {
734 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
735 goto reg_cmpl_handler_end;
737 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
740 queue_work(fnic_event_queue, &fnic->frame_work);
742 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
745 reg_cmpl_handler_end:
749 static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
752 if (wq->to_clean_index <= wq->to_use_index) {
753 /* out of range, stale request_out index */
754 if (request_out < wq->to_clean_index ||
755 request_out >= wq->to_use_index)
758 /* out of range, stale request_out index */
759 if (request_out < wq->to_clean_index &&
760 request_out >= wq->to_use_index)
763 /* request_out index is in range */
769 * Mark that ack received and store the Ack index. If there are multiple
770 * acks received before Tx thread cleans it up, the latest value will be
771 * used which is correct behavior. This state should be in the copy Wq
772 * instead of in the fnic
774 static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
775 unsigned int cq_index,
776 struct fcpio_fw_req *desc)
778 struct vnic_wq_copy *wq;
779 u16 request_out = desc->u.ack.request_out;
781 u64 *ox_id_tag = (u64 *)(void *)desc;
783 /* mark the ack state */
784 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
785 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
787 fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
788 if (is_ack_index_in_range(wq, request_out)) {
789 fnic->fw_ack_index[0] = request_out;
790 fnic->fw_ack_recd[0] = 1;
793 &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
795 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
796 FNIC_TRACE(fnic_fcpio_ack_handler,
797 fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
798 ox_id_tag[4], ox_id_tag[5]);
802 * fnic_fcpio_icmnd_cmpl_handler
803 * Routine to handle icmnd completions
805 static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
806 struct fcpio_fw_req *desc)
810 struct fcpio_tag tag;
813 struct fcpio_icmnd_cmpl *icmnd_cmpl;
814 struct fnic_io_req *io_req;
815 struct scsi_cmnd *sc;
816 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
820 unsigned long start_time;
822 /* Decode the cmpl description to get the io_req id */
823 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
824 fcpio_tag_id_dec(&tag, &id);
825 icmnd_cmpl = &desc->u.icmnd_cmpl;
827 if (id >= fnic->fnic_max_tag_id) {
828 shost_printk(KERN_ERR, fnic->lport->host,
829 "Tag out of range tag %x hdr status = %s\n",
830 id, fnic_fcpio_status_to_str(hdr_status));
834 sc = scsi_host_find_tag(fnic->lport->host, id);
837 atomic64_inc(&fnic_stats->io_stats.sc_null);
838 shost_printk(KERN_ERR, fnic->lport->host,
839 "icmnd_cmpl sc is null - "
840 "hdr status = %s tag = 0x%x desc = 0x%p\n",
841 fnic_fcpio_status_to_str(hdr_status), id, desc);
842 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
843 fnic->lport->host->host_no, id,
844 ((u64)icmnd_cmpl->_resvd0[1] << 16 |
845 (u64)icmnd_cmpl->_resvd0[0]),
846 ((u64)hdr_status << 16 |
847 (u64)icmnd_cmpl->scsi_status << 8 |
848 (u64)icmnd_cmpl->flags), desc,
849 (u64)icmnd_cmpl->residual, 0);
853 io_lock = fnic_io_lock_hash(fnic, sc);
854 spin_lock_irqsave(io_lock, flags);
855 io_req = (struct fnic_io_req *)CMD_SP(sc);
856 WARN_ON_ONCE(!io_req);
858 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
859 CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
860 spin_unlock_irqrestore(io_lock, flags);
861 shost_printk(KERN_ERR, fnic->lport->host,
862 "icmnd_cmpl io_req is null - "
863 "hdr status = %s tag = 0x%x sc 0x%p\n",
864 fnic_fcpio_status_to_str(hdr_status), id, sc);
867 start_time = io_req->start_time;
869 /* firmware completed the io */
870 io_req->io_completed = 1;
873 * if SCSI-ML has already issued abort on this command,
874 * ignore completion of the IO. The abts path will clean it up
876 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
877 spin_unlock_irqrestore(io_lock, flags);
878 CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
879 switch (hdr_status) {
881 CMD_FLAGS(sc) |= FNIC_IO_DONE;
882 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
883 "icmnd_cmpl ABTS pending hdr status = %s "
884 "sc 0x%p scsi_status %x residual %d\n",
885 fnic_fcpio_status_to_str(hdr_status), sc,
886 icmnd_cmpl->scsi_status,
887 icmnd_cmpl->residual);
890 CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
893 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
894 "icmnd_cmpl abts pending "
895 "hdr status = %s tag = 0x%x sc = 0x%p\n",
896 fnic_fcpio_status_to_str(hdr_status),
903 /* Mark the IO as complete */
904 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
906 icmnd_cmpl = &desc->u.icmnd_cmpl;
908 switch (hdr_status) {
910 sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
911 xfer_len = scsi_bufflen(sc);
912 scsi_set_resid(sc, icmnd_cmpl->residual);
914 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
915 xfer_len -= icmnd_cmpl->residual;
917 if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
918 atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
921 case FCPIO_TIMEOUT: /* request was timed out */
922 atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
923 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
926 case FCPIO_ABORTED: /* request was aborted */
927 atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
928 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
931 case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
932 atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
933 scsi_set_resid(sc, icmnd_cmpl->residual);
934 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
937 case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
938 atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
939 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
942 case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
943 atomic64_inc(&fnic_stats->io_stats.io_not_found);
944 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
947 case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
948 atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
949 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
952 case FCPIO_FW_ERR: /* request was terminated due fw error */
953 atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
954 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
957 case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
958 atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
959 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
962 case FCPIO_INVALID_HEADER: /* header contains invalid data */
963 case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
964 case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
966 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
970 /* Break link with the SCSI command */
972 CMD_FLAGS(sc) |= FNIC_IO_DONE;
974 spin_unlock_irqrestore(io_lock, flags);
976 if (hdr_status != FCPIO_SUCCESS) {
977 atomic64_inc(&fnic_stats->io_stats.io_failures);
978 shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
979 fnic_fcpio_status_to_str(hdr_status));
982 fnic_release_ioreq_buf(fnic, io_req, sc);
984 mempool_free(io_req, fnic->io_req_pool);
986 cmd_trace = ((u64)hdr_status << 56) |
987 (u64)icmnd_cmpl->scsi_status << 48 |
988 (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
989 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
990 (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
992 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
993 sc->device->host->host_no, id, sc,
994 ((u64)icmnd_cmpl->_resvd0[1] << 56 |
995 (u64)icmnd_cmpl->_resvd0[0] << 48 |
996 jiffies_to_msecs(jiffies - start_time)),
998 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1000 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1001 fnic->lport->host_stats.fcp_input_requests++;
1002 fnic->fcp_input_bytes += xfer_len;
1003 } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
1004 fnic->lport->host_stats.fcp_output_requests++;
1005 fnic->fcp_output_bytes += xfer_len;
1007 fnic->lport->host_stats.fcp_control_requests++;
1009 atomic64_dec(&fnic_stats->io_stats.active_ios);
1010 if (atomic64_read(&fnic->io_cmpl_skip))
1011 atomic64_dec(&fnic->io_cmpl_skip);
1013 atomic64_inc(&fnic_stats->io_stats.io_completions);
1015 /* Call SCSI completion function to complete the IO */
1020 /* fnic_fcpio_itmf_cmpl_handler
1021 * Routine to handle itmf completions
1023 static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
1024 struct fcpio_fw_req *desc)
1028 struct fcpio_tag tag;
1030 struct scsi_cmnd *sc;
1031 struct fnic_io_req *io_req;
1032 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1033 struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1034 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1035 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1036 unsigned long flags;
1037 spinlock_t *io_lock;
1038 unsigned long start_time;
1040 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
1041 fcpio_tag_id_dec(&tag, &id);
1043 if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1044 shost_printk(KERN_ERR, fnic->lport->host,
1045 "Tag out of range tag %x hdr status = %s\n",
1046 id, fnic_fcpio_status_to_str(hdr_status));
1050 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
1053 atomic64_inc(&fnic_stats->io_stats.sc_null);
1054 shost_printk(KERN_ERR, fnic->lport->host,
1055 "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1056 fnic_fcpio_status_to_str(hdr_status), id);
1059 io_lock = fnic_io_lock_hash(fnic, sc);
1060 spin_lock_irqsave(io_lock, flags);
1061 io_req = (struct fnic_io_req *)CMD_SP(sc);
1062 WARN_ON_ONCE(!io_req);
1064 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1065 spin_unlock_irqrestore(io_lock, flags);
1066 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1067 shost_printk(KERN_ERR, fnic->lport->host,
1068 "itmf_cmpl io_req is null - "
1069 "hdr status = %s tag = 0x%x sc 0x%p\n",
1070 fnic_fcpio_status_to_str(hdr_status), id, sc);
1073 start_time = io_req->start_time;
1075 if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1076 /* Abort and terminate completion of device reset req */
1077 /* REVISIT : Add asserts about various flags */
1078 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1079 "dev reset abts cmpl recd. id %x status %s\n",
1080 id, fnic_fcpio_status_to_str(hdr_status));
1081 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1082 CMD_ABTS_STATUS(sc) = hdr_status;
1083 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1084 if (io_req->abts_done)
1085 complete(io_req->abts_done);
1086 spin_unlock_irqrestore(io_lock, flags);
1087 } else if (id & FNIC_TAG_ABORT) {
1088 /* Completion of abort cmd */
1089 switch (hdr_status) {
1093 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1094 atomic64_inc(&abts_stats->abort_fw_timeouts);
1097 &term_stats->terminate_fw_timeouts);
1099 case FCPIO_ITMF_REJECTED:
1100 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1101 "abort reject recd. id %d\n",
1102 (int)(id & FNIC_TAG_MASK));
1104 case FCPIO_IO_NOT_FOUND:
1105 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1106 atomic64_inc(&abts_stats->abort_io_not_found);
1109 &term_stats->terminate_io_not_found);
1112 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1113 atomic64_inc(&abts_stats->abort_failures);
1116 &term_stats->terminate_failures);
1119 if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
1120 /* This is a late completion. Ignore it */
1121 spin_unlock_irqrestore(io_lock, flags);
1125 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1127 /* If the status is IO not found consider it as success */
1128 if (hdr_status == FCPIO_IO_NOT_FOUND)
1129 CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
1131 CMD_ABTS_STATUS(sc) = hdr_status;
1133 if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1134 atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1136 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1137 "abts cmpl recd. id %d status %s\n",
1138 (int)(id & FNIC_TAG_MASK),
1139 fnic_fcpio_status_to_str(hdr_status));
1142 * If scsi_eh thread is blocked waiting for abts to complete,
1143 * signal completion to it. IO will be cleaned in the thread
1144 * else clean it in this context
1146 if (io_req->abts_done) {
1147 complete(io_req->abts_done);
1148 spin_unlock_irqrestore(io_lock, flags);
1150 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1151 "abts cmpl, completing IO\n");
1153 sc->result = (DID_ERROR << 16);
1155 spin_unlock_irqrestore(io_lock, flags);
1157 fnic_release_ioreq_buf(fnic, io_req, sc);
1158 mempool_free(io_req, fnic->io_req_pool);
1159 if (sc->scsi_done) {
1160 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1161 sc->device->host->host_no, id,
1163 jiffies_to_msecs(jiffies - start_time),
1165 (((u64)hdr_status << 40) |
1166 (u64)sc->cmnd[0] << 32 |
1167 (u64)sc->cmnd[2] << 24 |
1168 (u64)sc->cmnd[3] << 16 |
1169 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1170 (((u64)CMD_FLAGS(sc) << 32) |
1173 atomic64_dec(&fnic_stats->io_stats.active_ios);
1174 if (atomic64_read(&fnic->io_cmpl_skip))
1175 atomic64_dec(&fnic->io_cmpl_skip);
1177 atomic64_inc(&fnic_stats->io_stats.io_completions);
1181 } else if (id & FNIC_TAG_DEV_RST) {
1182 /* Completion of device reset */
1183 CMD_LR_STATUS(sc) = hdr_status;
1184 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1185 spin_unlock_irqrestore(io_lock, flags);
1186 CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
1187 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1188 sc->device->host->host_no, id, sc,
1189 jiffies_to_msecs(jiffies - start_time),
1191 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1192 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1193 "Terminate pending "
1194 "dev reset cmpl recd. id %d status %s\n",
1195 (int)(id & FNIC_TAG_MASK),
1196 fnic_fcpio_status_to_str(hdr_status));
1199 if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
1200 /* Need to wait for terminate completion */
1201 spin_unlock_irqrestore(io_lock, flags);
1202 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1203 sc->device->host->host_no, id, sc,
1204 jiffies_to_msecs(jiffies - start_time),
1206 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1207 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1208 "dev reset cmpl recd after time out. "
1209 "id %d status %s\n",
1210 (int)(id & FNIC_TAG_MASK),
1211 fnic_fcpio_status_to_str(hdr_status));
1214 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
1215 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1216 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1217 "dev reset cmpl recd. id %d status %s\n",
1218 (int)(id & FNIC_TAG_MASK),
1219 fnic_fcpio_status_to_str(hdr_status));
1220 if (io_req->dr_done)
1221 complete(io_req->dr_done);
1222 spin_unlock_irqrestore(io_lock, flags);
1225 shost_printk(KERN_ERR, fnic->lport->host,
1226 "Unexpected itmf io state %s tag %x\n",
1227 fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
1228 spin_unlock_irqrestore(io_lock, flags);
1234 * fnic_fcpio_cmpl_handler
1235 * Routine to service the cq for wq_copy
1237 static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1238 unsigned int cq_index,
1239 struct fcpio_fw_req *desc)
1241 struct fnic *fnic = vnic_dev_priv(vdev);
1243 switch (desc->hdr.type) {
1244 case FCPIO_ICMND_CMPL: /* fw completed a command */
1245 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1246 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1247 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1248 case FCPIO_RESET_CMPL: /* fw completed reset */
1249 atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1255 switch (desc->hdr.type) {
1256 case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1257 fnic_fcpio_ack_handler(fnic, cq_index, desc);
1260 case FCPIO_ICMND_CMPL: /* fw completed a command */
1261 fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
1264 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1265 fnic_fcpio_itmf_cmpl_handler(fnic, desc);
1268 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1269 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1270 fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1273 case FCPIO_RESET_CMPL: /* fw completed reset */
1274 fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1278 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1279 "firmware completion type %d\n",
1288 * fnic_wq_copy_cmpl_handler
1289 * Routine to process wq copy
1291 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1293 unsigned int wq_work_done = 0;
1294 unsigned int i, cq_index;
1295 unsigned int cur_work_done;
1297 for (i = 0; i < fnic->wq_copy_count; i++) {
1298 cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1299 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1300 fnic_fcpio_cmpl_handler,
1302 wq_work_done += cur_work_done;
1304 return wq_work_done;
1307 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1310 struct fnic_io_req *io_req;
1311 unsigned long flags = 0;
1312 struct scsi_cmnd *sc;
1313 spinlock_t *io_lock;
1314 unsigned long start_time = 0;
1315 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1317 for (i = 0; i < fnic->fnic_max_tag_id; i++) {
1318 if (i == exclude_id)
1321 io_lock = fnic_io_lock_tag(fnic, i);
1322 spin_lock_irqsave(io_lock, flags);
1323 sc = scsi_host_find_tag(fnic->lport->host, i);
1325 spin_unlock_irqrestore(io_lock, flags);
1329 io_req = (struct fnic_io_req *)CMD_SP(sc);
1330 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1331 !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
1333 * We will be here only when FW completes reset
1334 * without sending completions for outstanding ios.
1336 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1337 if (io_req && io_req->dr_done)
1338 complete(io_req->dr_done);
1339 else if (io_req && io_req->abts_done)
1340 complete(io_req->abts_done);
1341 spin_unlock_irqrestore(io_lock, flags);
1343 } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1344 spin_unlock_irqrestore(io_lock, flags);
1348 spin_unlock_irqrestore(io_lock, flags);
1349 goto cleanup_scsi_cmd;
1354 spin_unlock_irqrestore(io_lock, flags);
1357 * If there is a scsi_cmnd associated with this io_req, then
1358 * free the corresponding state
1360 start_time = io_req->start_time;
1361 fnic_release_ioreq_buf(fnic, io_req, sc);
1362 mempool_free(io_req, fnic->io_req_pool);
1365 sc->result = DID_TRANSPORT_DISRUPTED << 16;
1366 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1367 "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
1368 __func__, (jiffies - start_time));
1370 if (atomic64_read(&fnic->io_cmpl_skip))
1371 atomic64_dec(&fnic->io_cmpl_skip);
1373 atomic64_inc(&fnic_stats->io_stats.io_completions);
1375 /* Complete the command to SCSI */
1376 if (sc->scsi_done) {
1377 FNIC_TRACE(fnic_cleanup_io,
1378 sc->device->host->host_no, i, sc,
1379 jiffies_to_msecs(jiffies - start_time),
1380 0, ((u64)sc->cmnd[0] << 32 |
1381 (u64)sc->cmnd[2] << 24 |
1382 (u64)sc->cmnd[3] << 16 |
1383 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1384 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1391 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1392 struct fcpio_host_req *desc)
1395 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1396 struct fnic_io_req *io_req;
1397 struct scsi_cmnd *sc;
1398 unsigned long flags;
1399 spinlock_t *io_lock;
1400 unsigned long start_time = 0;
1402 /* get the tag reference */
1403 fcpio_tag_id_dec(&desc->hdr.tag, &id);
1404 id &= FNIC_TAG_MASK;
1406 if (id >= fnic->fnic_max_tag_id)
1409 sc = scsi_host_find_tag(fnic->lport->host, id);
1413 io_lock = fnic_io_lock_hash(fnic, sc);
1414 spin_lock_irqsave(io_lock, flags);
1416 /* Get the IO context which this desc refers to */
1417 io_req = (struct fnic_io_req *)CMD_SP(sc);
1419 /* fnic interrupts are turned off by now */
1422 spin_unlock_irqrestore(io_lock, flags);
1423 goto wq_copy_cleanup_scsi_cmd;
1428 spin_unlock_irqrestore(io_lock, flags);
1430 start_time = io_req->start_time;
1431 fnic_release_ioreq_buf(fnic, io_req, sc);
1432 mempool_free(io_req, fnic->io_req_pool);
1434 wq_copy_cleanup_scsi_cmd:
1435 sc->result = DID_NO_CONNECT << 16;
1436 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1437 " DID_NO_CONNECT\n");
1439 if (sc->scsi_done) {
1440 FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1441 sc->device->host->host_no, id, sc,
1442 jiffies_to_msecs(jiffies - start_time),
1443 0, ((u64)sc->cmnd[0] << 32 |
1444 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1445 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1446 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1452 static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1453 u32 task_req, u8 *fc_lun,
1454 struct fnic_io_req *io_req)
1456 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1457 struct Scsi_Host *host = fnic->lport->host;
1458 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1459 unsigned long flags;
1461 spin_lock_irqsave(host->host_lock, flags);
1462 if (unlikely(fnic_chk_state_flags_locked(fnic,
1463 FNIC_FLAGS_IO_BLOCKED))) {
1464 spin_unlock_irqrestore(host->host_lock, flags);
1467 atomic_inc(&fnic->in_flight);
1468 spin_unlock_irqrestore(host->host_lock, flags);
1470 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1472 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1473 free_wq_copy_descs(fnic, wq);
1475 if (!vnic_wq_copy_desc_avail(wq)) {
1476 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1477 atomic_dec(&fnic->in_flight);
1478 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1479 "fnic_queue_abort_io_req: failure: no descriptors\n");
1480 atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1483 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1484 0, task_req, tag, fc_lun, io_req->port_id,
1485 fnic->config.ra_tov, fnic->config.ed_tov);
1487 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1488 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1489 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1490 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1491 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1493 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1494 atomic_dec(&fnic->in_flight);
1499 static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1504 struct fnic_io_req *io_req;
1505 spinlock_t *io_lock;
1506 unsigned long flags;
1507 struct scsi_cmnd *sc;
1508 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1509 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1510 struct scsi_lun fc_lun;
1511 enum fnic_ioreq_state old_ioreq_state;
1513 FNIC_SCSI_DBG(KERN_DEBUG,
1515 "fnic_rport_exch_reset called portid 0x%06x\n",
1518 if (fnic->in_remove)
1521 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1523 io_lock = fnic_io_lock_tag(fnic, tag);
1524 spin_lock_irqsave(io_lock, flags);
1525 sc = scsi_host_find_tag(fnic->lport->host, tag);
1527 spin_unlock_irqrestore(io_lock, flags);
1531 io_req = (struct fnic_io_req *)CMD_SP(sc);
1533 if (!io_req || io_req->port_id != port_id) {
1534 spin_unlock_irqrestore(io_lock, flags);
1538 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1539 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1540 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1541 "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1543 spin_unlock_irqrestore(io_lock, flags);
1548 * Found IO that is still pending with firmware and
1549 * belongs to rport that went away
1551 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1552 spin_unlock_irqrestore(io_lock, flags);
1555 if (io_req->abts_done) {
1556 shost_printk(KERN_ERR, fnic->lport->host,
1557 "fnic_rport_exch_reset: io_req->abts_done is set "
1559 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1562 if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1563 shost_printk(KERN_ERR, fnic->lport->host,
1565 "IO not yet issued %p tag 0x%x flags "
1567 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1569 old_ioreq_state = CMD_STATE(sc);
1570 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1571 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1572 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1573 atomic64_inc(&reset_stats->device_reset_terminates);
1574 abt_tag = (tag | FNIC_TAG_DEV_RST);
1575 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1576 "fnic_rport_exch_reset dev rst sc 0x%p\n",
1580 BUG_ON(io_req->abts_done);
1582 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1583 "fnic_rport_reset_exch: Issuing abts\n");
1585 spin_unlock_irqrestore(io_lock, flags);
1587 /* Now queue the abort command to firmware */
1588 int_to_scsilun(sc->device->lun, &fc_lun);
1590 if (fnic_queue_abort_io_req(fnic, abt_tag,
1591 FCPIO_ITMF_ABT_TASK_TERM,
1592 fc_lun.scsi_lun, io_req)) {
1594 * Revert the cmd state back to old state, if
1595 * it hasn't changed in between. This cmd will get
1596 * aborted later by scsi_eh, or cleaned up during
1599 spin_lock_irqsave(io_lock, flags);
1600 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1601 CMD_STATE(sc) = old_ioreq_state;
1602 spin_unlock_irqrestore(io_lock, flags);
1604 spin_lock_irqsave(io_lock, flags);
1605 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1606 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1608 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1609 spin_unlock_irqrestore(io_lock, flags);
1610 atomic64_inc(&term_stats->terminates);
1614 if (term_cnt > atomic64_read(&term_stats->max_terminates))
1615 atomic64_set(&term_stats->max_terminates, term_cnt);
1619 void fnic_terminate_rport_io(struct fc_rport *rport)
1624 struct fnic_io_req *io_req;
1625 spinlock_t *io_lock;
1626 unsigned long flags;
1627 struct scsi_cmnd *sc;
1628 struct scsi_lun fc_lun;
1629 struct fc_rport_libfc_priv *rdata;
1630 struct fc_lport *lport;
1632 struct fc_rport *cmd_rport;
1633 struct reset_stats *reset_stats;
1634 struct terminate_stats *term_stats;
1635 enum fnic_ioreq_state old_ioreq_state;
1638 printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1641 rdata = rport->dd_data;
1644 printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1647 lport = rdata->local_port;
1650 printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1653 fnic = lport_priv(lport);
1654 FNIC_SCSI_DBG(KERN_DEBUG,
1655 fnic->lport->host, "fnic_terminate_rport_io called"
1656 " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1657 rport->port_name, rport->node_name, rport,
1660 if (fnic->in_remove)
1663 reset_stats = &fnic->fnic_stats.reset_stats;
1664 term_stats = &fnic->fnic_stats.term_stats;
1666 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1668 io_lock = fnic_io_lock_tag(fnic, tag);
1669 spin_lock_irqsave(io_lock, flags);
1670 sc = scsi_host_find_tag(fnic->lport->host, tag);
1672 spin_unlock_irqrestore(io_lock, flags);
1676 cmd_rport = starget_to_rport(scsi_target(sc->device));
1677 if (rport != cmd_rport) {
1678 spin_unlock_irqrestore(io_lock, flags);
1682 io_req = (struct fnic_io_req *)CMD_SP(sc);
1684 if (!io_req || rport != cmd_rport) {
1685 spin_unlock_irqrestore(io_lock, flags);
1689 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1690 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1691 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1692 "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
1694 spin_unlock_irqrestore(io_lock, flags);
1698 * Found IO that is still pending with firmware and
1699 * belongs to rport that went away
1701 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1702 spin_unlock_irqrestore(io_lock, flags);
1705 if (io_req->abts_done) {
1706 shost_printk(KERN_ERR, fnic->lport->host,
1707 "fnic_terminate_rport_io: io_req->abts_done is set "
1709 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1711 if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1712 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1713 "fnic_terminate_rport_io "
1714 "IO not yet issued %p tag 0x%x flags "
1716 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1718 old_ioreq_state = CMD_STATE(sc);
1719 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1720 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1721 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1722 atomic64_inc(&reset_stats->device_reset_terminates);
1723 abt_tag = (tag | FNIC_TAG_DEV_RST);
1724 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1725 "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
1728 BUG_ON(io_req->abts_done);
1730 FNIC_SCSI_DBG(KERN_DEBUG,
1732 "fnic_terminate_rport_io: Issuing abts\n");
1734 spin_unlock_irqrestore(io_lock, flags);
1736 /* Now queue the abort command to firmware */
1737 int_to_scsilun(sc->device->lun, &fc_lun);
1739 if (fnic_queue_abort_io_req(fnic, abt_tag,
1740 FCPIO_ITMF_ABT_TASK_TERM,
1741 fc_lun.scsi_lun, io_req)) {
1743 * Revert the cmd state back to old state, if
1744 * it hasn't changed in between. This cmd will get
1745 * aborted later by scsi_eh, or cleaned up during
1748 spin_lock_irqsave(io_lock, flags);
1749 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1750 CMD_STATE(sc) = old_ioreq_state;
1751 spin_unlock_irqrestore(io_lock, flags);
1753 spin_lock_irqsave(io_lock, flags);
1754 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1755 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1757 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1758 spin_unlock_irqrestore(io_lock, flags);
1759 atomic64_inc(&term_stats->terminates);
1763 if (term_cnt > atomic64_read(&term_stats->max_terminates))
1764 atomic64_set(&term_stats->max_terminates, term_cnt);
1769 * This function is exported to SCSI for sending abort cmnds.
1770 * A SCSI IO is represented by a io_req in the driver.
1771 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1773 int fnic_abort_cmd(struct scsi_cmnd *sc)
1775 struct fc_lport *lp;
1777 struct fnic_io_req *io_req = NULL;
1778 struct fc_rport *rport;
1779 spinlock_t *io_lock;
1780 unsigned long flags;
1781 unsigned long start_time = 0;
1784 struct scsi_lun fc_lun;
1785 struct fnic_stats *fnic_stats;
1786 struct abort_stats *abts_stats;
1787 struct terminate_stats *term_stats;
1788 enum fnic_ioreq_state old_ioreq_state;
1790 DECLARE_COMPLETION_ONSTACK(tm_done);
1792 /* Wait for rport to unblock */
1793 fc_block_scsi_eh(sc);
1795 /* Get local-port, check ready and link up */
1796 lp = shost_priv(sc->device->host);
1798 fnic = lport_priv(lp);
1799 fnic_stats = &fnic->fnic_stats;
1800 abts_stats = &fnic->fnic_stats.abts_stats;
1801 term_stats = &fnic->fnic_stats.term_stats;
1803 rport = starget_to_rport(scsi_target(sc->device));
1804 tag = sc->request->tag;
1805 FNIC_SCSI_DBG(KERN_DEBUG,
1807 "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
1808 rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
1810 CMD_FLAGS(sc) = FNIC_NO_FLAGS;
1812 if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1814 goto fnic_abort_cmd_end;
1818 * Avoid a race between SCSI issuing the abort and the device
1819 * completing the command.
1821 * If the command is already completed by the fw cmpl code,
1822 * we just return SUCCESS from here. This means that the abort
1823 * succeeded. In the SCSI ML, since the timeout for command has
1824 * happened, the completion wont actually complete the command
1825 * and it will be considered as an aborted command
1827 * The CMD_SP will not be cleared except while holding io_req_lock.
1829 io_lock = fnic_io_lock_hash(fnic, sc);
1830 spin_lock_irqsave(io_lock, flags);
1831 io_req = (struct fnic_io_req *)CMD_SP(sc);
1833 spin_unlock_irqrestore(io_lock, flags);
1834 goto fnic_abort_cmd_end;
1837 io_req->abts_done = &tm_done;
1839 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1840 spin_unlock_irqrestore(io_lock, flags);
1844 * Command is still pending, need to abort it
1845 * If the firmware completes the command after this point,
1846 * the completion wont be done till mid-layer, since abort
1847 * has already started.
1849 old_ioreq_state = CMD_STATE(sc);
1850 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1851 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1853 spin_unlock_irqrestore(io_lock, flags);
1856 * Check readiness of the remote port. If the path to remote
1857 * port is up, then send abts to the remote port to terminate
1858 * the IO. Else, just locally terminate the IO in the firmware
1860 if (fc_remote_port_chkready(rport) == 0)
1861 task_req = FCPIO_ITMF_ABT_TASK;
1863 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
1864 task_req = FCPIO_ITMF_ABT_TASK_TERM;
1867 /* Now queue the abort command to firmware */
1868 int_to_scsilun(sc->device->lun, &fc_lun);
1870 if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1871 fc_lun.scsi_lun, io_req)) {
1872 spin_lock_irqsave(io_lock, flags);
1873 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1874 CMD_STATE(sc) = old_ioreq_state;
1875 io_req = (struct fnic_io_req *)CMD_SP(sc);
1877 io_req->abts_done = NULL;
1878 spin_unlock_irqrestore(io_lock, flags);
1880 goto fnic_abort_cmd_end;
1882 if (task_req == FCPIO_ITMF_ABT_TASK) {
1883 CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
1884 atomic64_inc(&fnic_stats->abts_stats.aborts);
1886 CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
1887 atomic64_inc(&fnic_stats->term_stats.terminates);
1891 * We queued an abort IO, wait for its completion.
1892 * Once the firmware completes the abort command, it will
1893 * wake up this thread.
1896 wait_for_completion_timeout(&tm_done,
1898 (2 * fnic->config.ra_tov +
1899 fnic->config.ed_tov));
1901 /* Check the abort status */
1902 spin_lock_irqsave(io_lock, flags);
1904 io_req = (struct fnic_io_req *)CMD_SP(sc);
1906 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1907 spin_unlock_irqrestore(io_lock, flags);
1908 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1910 goto fnic_abort_cmd_end;
1912 io_req->abts_done = NULL;
1914 /* fw did not complete abort, timed out */
1915 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1916 spin_unlock_irqrestore(io_lock, flags);
1917 if (task_req == FCPIO_ITMF_ABT_TASK) {
1918 atomic64_inc(&abts_stats->abort_drv_timeouts);
1920 atomic64_inc(&term_stats->terminate_drv_timeouts);
1922 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
1924 goto fnic_abort_cmd_end;
1927 /* IO out of order */
1929 if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
1930 spin_unlock_irqrestore(io_lock, flags);
1931 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1932 "Issuing Host reset due to out of order IO\n");
1934 if (fnic_host_reset(sc) == FAILED) {
1935 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1936 "fnic_host_reset failed.\n");
1939 goto fnic_abort_cmd_end;
1942 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1944 start_time = io_req->start_time;
1946 * firmware completed the abort, check the status,
1947 * free the io_req if successful. If abort fails,
1948 * Device reset will clean the I/O.
1950 if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
1954 spin_unlock_irqrestore(io_lock, flags);
1955 goto fnic_abort_cmd_end;
1958 spin_unlock_irqrestore(io_lock, flags);
1960 fnic_release_ioreq_buf(fnic, io_req, sc);
1961 mempool_free(io_req, fnic->io_req_pool);
1963 if (sc->scsi_done) {
1964 /* Call SCSI completion function to complete the IO */
1965 sc->result = (DID_ABORT << 16);
1967 atomic64_dec(&fnic_stats->io_stats.active_ios);
1968 if (atomic64_read(&fnic->io_cmpl_skip))
1969 atomic64_dec(&fnic->io_cmpl_skip);
1971 atomic64_inc(&fnic_stats->io_stats.io_completions);
1975 FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
1976 sc->request->tag, sc,
1977 jiffies_to_msecs(jiffies - start_time),
1978 0, ((u64)sc->cmnd[0] << 32 |
1979 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1980 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1981 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1983 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1984 "Returning from abort cmd type %x %s\n", task_req,
1986 "SUCCESS" : "FAILED");
1990 static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1991 struct scsi_cmnd *sc,
1992 struct fnic_io_req *io_req)
1994 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1995 struct Scsi_Host *host = fnic->lport->host;
1996 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1997 struct scsi_lun fc_lun;
1999 unsigned long intr_flags;
2001 spin_lock_irqsave(host->host_lock, intr_flags);
2002 if (unlikely(fnic_chk_state_flags_locked(fnic,
2003 FNIC_FLAGS_IO_BLOCKED))) {
2004 spin_unlock_irqrestore(host->host_lock, intr_flags);
2007 atomic_inc(&fnic->in_flight);
2008 spin_unlock_irqrestore(host->host_lock, intr_flags);
2010 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
2012 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
2013 free_wq_copy_descs(fnic, wq);
2015 if (!vnic_wq_copy_desc_avail(wq)) {
2016 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2017 "queue_dr_io_req failure - no descriptors\n");
2018 atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
2023 /* fill in the lun info */
2024 int_to_scsilun(sc->device->lun, &fc_lun);
2026 fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
2027 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
2028 fc_lun.scsi_lun, io_req->port_id,
2029 fnic->config.ra_tov, fnic->config.ed_tov);
2031 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
2032 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
2033 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
2034 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
2035 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
2038 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
2039 atomic_dec(&fnic->in_flight);
2045 * Clean up any pending aborts on the lun
2046 * For each outstanding IO on this lun, whose abort is not completed by fw,
2047 * issue a local abort. Wait for abort to complete. Return 0 if all commands
2048 * successfully aborted, 1 otherwise
2050 static int fnic_clean_pending_aborts(struct fnic *fnic,
2051 struct scsi_cmnd *lr_sc,
2056 struct fnic_io_req *io_req;
2057 spinlock_t *io_lock;
2058 unsigned long flags;
2060 struct scsi_cmnd *sc;
2061 struct scsi_lun fc_lun;
2062 struct scsi_device *lun_dev = lr_sc->device;
2063 DECLARE_COMPLETION_ONSTACK(tm_done);
2064 enum fnic_ioreq_state old_ioreq_state;
2066 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2067 io_lock = fnic_io_lock_tag(fnic, tag);
2068 spin_lock_irqsave(io_lock, flags);
2069 sc = scsi_host_find_tag(fnic->lport->host, tag);
2071 * ignore this lun reset cmd if issued using new SC
2072 * or cmds that do not belong to this lun
2074 if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
2075 spin_unlock_irqrestore(io_lock, flags);
2079 io_req = (struct fnic_io_req *)CMD_SP(sc);
2081 if (!io_req || sc->device != lun_dev) {
2082 spin_unlock_irqrestore(io_lock, flags);
2087 * Found IO that is still pending with firmware and
2088 * belongs to the LUN that we are resetting
2090 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2091 "Found IO in %s on lun\n",
2092 fnic_ioreq_state_to_str(CMD_STATE(sc)));
2094 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
2095 spin_unlock_irqrestore(io_lock, flags);
2098 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
2099 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
2100 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2101 "%s dev rst not pending sc 0x%p\n", __func__,
2103 spin_unlock_irqrestore(io_lock, flags);
2107 if (io_req->abts_done)
2108 shost_printk(KERN_ERR, fnic->lport->host,
2109 "%s: io_req->abts_done is set state is %s\n",
2110 __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
2111 old_ioreq_state = CMD_STATE(sc);
2113 * Any pending IO issued prior to reset is expected to be
2114 * in abts pending state, if not we need to set
2115 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2116 * When IO is completed, the IO will be handed over and
2117 * handled in this function.
2119 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2121 BUG_ON(io_req->abts_done);
2124 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
2125 abt_tag |= FNIC_TAG_DEV_RST;
2126 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2127 "%s: dev rst sc 0x%p\n", __func__, sc);
2130 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
2131 io_req->abts_done = &tm_done;
2132 spin_unlock_irqrestore(io_lock, flags);
2134 /* Now queue the abort command to firmware */
2135 int_to_scsilun(sc->device->lun, &fc_lun);
2137 if (fnic_queue_abort_io_req(fnic, abt_tag,
2138 FCPIO_ITMF_ABT_TASK_TERM,
2139 fc_lun.scsi_lun, io_req)) {
2140 spin_lock_irqsave(io_lock, flags);
2141 io_req = (struct fnic_io_req *)CMD_SP(sc);
2143 io_req->abts_done = NULL;
2144 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2145 CMD_STATE(sc) = old_ioreq_state;
2146 spin_unlock_irqrestore(io_lock, flags);
2148 goto clean_pending_aborts_end;
2150 spin_lock_irqsave(io_lock, flags);
2151 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
2152 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2153 spin_unlock_irqrestore(io_lock, flags);
2155 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
2157 wait_for_completion_timeout(&tm_done,
2159 (fnic->config.ed_tov));
2161 /* Recheck cmd state to check if it is now aborted */
2162 spin_lock_irqsave(io_lock, flags);
2163 io_req = (struct fnic_io_req *)CMD_SP(sc);
2165 spin_unlock_irqrestore(io_lock, flags);
2166 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
2170 io_req->abts_done = NULL;
2172 /* if abort is still pending with fw, fail */
2173 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
2174 spin_unlock_irqrestore(io_lock, flags);
2175 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
2177 goto clean_pending_aborts_end;
2179 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2181 /* original sc used for lr is handled by dev reset code */
2184 spin_unlock_irqrestore(io_lock, flags);
2186 /* original sc used for lr is handled by dev reset code */
2188 fnic_release_ioreq_buf(fnic, io_req, sc);
2189 mempool_free(io_req, fnic->io_req_pool);
2193 * Any IO is returned during reset, it needs to call scsi_done
2194 * to return the scsi_cmnd to upper layer.
2196 if (sc->scsi_done) {
2197 /* Set result to let upper SCSI layer retry */
2198 sc->result = DID_RESET << 16;
2203 schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2205 /* walk again to check, if IOs are still pending in fw */
2206 if (fnic_is_abts_pending(fnic, lr_sc))
2209 clean_pending_aborts_end:
2214 * fnic_scsi_host_start_tag
2215 * Allocates tagid from host's tag list
2218 fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2220 struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2221 int tag, ret = SCSI_NO_TAG;
2225 pr_err("Tags are not supported\n");
2230 tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
2231 if (tag >= bqt->max_depth) {
2232 pr_err("Tag allocation failure\n");
2235 } while (test_and_set_bit(tag, bqt->tag_map));
2237 bqt->tag_index[tag] = sc->request;
2238 sc->request->tag = tag;
2240 if (!sc->request->special)
2241 sc->request->special = sc;
2250 * fnic_scsi_host_end_tag
2251 * frees tag allocated by fnic_scsi_host_start_tag.
2254 fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2256 struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2257 int tag = sc->request->tag;
2259 if (tag == SCSI_NO_TAG)
2262 BUG_ON(!bqt || !bqt->tag_index[tag]);
2266 bqt->tag_index[tag] = NULL;
2267 clear_bit(tag, bqt->tag_map);
2273 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2274 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2277 int fnic_device_reset(struct scsi_cmnd *sc)
2279 struct fc_lport *lp;
2281 struct fnic_io_req *io_req = NULL;
2282 struct fc_rport *rport;
2285 spinlock_t *io_lock;
2286 unsigned long flags;
2287 unsigned long start_time = 0;
2288 struct scsi_lun fc_lun;
2289 struct fnic_stats *fnic_stats;
2290 struct reset_stats *reset_stats;
2292 DECLARE_COMPLETION_ONSTACK(tm_done);
2293 int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
2296 /* Wait for rport to unblock */
2297 fc_block_scsi_eh(sc);
2299 /* Get local-port, check ready and link up */
2300 lp = shost_priv(sc->device->host);
2302 fnic = lport_priv(lp);
2303 fnic_stats = &fnic->fnic_stats;
2304 reset_stats = &fnic->fnic_stats.reset_stats;
2306 atomic64_inc(&reset_stats->device_resets);
2308 rport = starget_to_rport(scsi_target(sc->device));
2309 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2310 "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
2311 rport->port_id, sc->device->lun, sc);
2313 if (lp->state != LPORT_ST_READY || !(lp->link_up))
2314 goto fnic_device_reset_end;
2316 /* Check if remote port up */
2317 if (fc_remote_port_chkready(rport)) {
2318 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
2319 goto fnic_device_reset_end;
2322 CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
2323 /* Allocate tag if not present */
2325 tag = sc->request->tag;
2326 if (unlikely(tag < 0)) {
2328 * XXX(hch): current the midlayer fakes up a struct
2329 * request for the explicit reset ioctls, and those
2330 * don't have a tag allocated to them. The below
2331 * code pokes into midlayer structures to paper over
2332 * this design issue, but that won't work for blk-mq.
2334 * Either someone who can actually test the hardware
2335 * will have to come up with a similar hack for the
2336 * blk-mq case, or we'll have to bite the bullet and
2337 * fix the way the EH ioctls work for real, but until
2338 * that happens we fail these explicit requests here.
2341 tag = fnic_scsi_host_start_tag(fnic, sc);
2342 if (unlikely(tag == SCSI_NO_TAG))
2343 goto fnic_device_reset_end;
2347 io_lock = fnic_io_lock_hash(fnic, sc);
2348 spin_lock_irqsave(io_lock, flags);
2349 io_req = (struct fnic_io_req *)CMD_SP(sc);
2352 * If there is a io_req attached to this command, then use it,
2353 * else allocate a new one.
2356 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2358 spin_unlock_irqrestore(io_lock, flags);
2359 goto fnic_device_reset_end;
2361 memset(io_req, 0, sizeof(*io_req));
2362 io_req->port_id = rport->port_id;
2363 CMD_SP(sc) = (char *)io_req;
2365 io_req->dr_done = &tm_done;
2366 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
2367 CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
2368 spin_unlock_irqrestore(io_lock, flags);
2370 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
2373 * issue the device reset, if enqueue failed, clean up the ioreq
2374 * and break assoc with scsi cmd
2376 if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2377 spin_lock_irqsave(io_lock, flags);
2378 io_req = (struct fnic_io_req *)CMD_SP(sc);
2380 io_req->dr_done = NULL;
2381 goto fnic_device_reset_clean;
2383 spin_lock_irqsave(io_lock, flags);
2384 CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
2385 spin_unlock_irqrestore(io_lock, flags);
2388 * Wait on the local completion for LUN reset. The io_req may be
2389 * freed while we wait since we hold no lock.
2391 wait_for_completion_timeout(&tm_done,
2392 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2394 spin_lock_irqsave(io_lock, flags);
2395 io_req = (struct fnic_io_req *)CMD_SP(sc);
2397 spin_unlock_irqrestore(io_lock, flags);
2398 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2399 "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
2400 goto fnic_device_reset_end;
2402 io_req->dr_done = NULL;
2404 status = CMD_LR_STATUS(sc);
2407 * If lun reset not completed, bail out with failed. io_req
2408 * gets cleaned up during higher levels of EH
2410 if (status == FCPIO_INVALID_CODE) {
2411 atomic64_inc(&reset_stats->device_reset_timeouts);
2412 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2413 "Device reset timed out\n");
2414 CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
2415 spin_unlock_irqrestore(io_lock, flags);
2416 int_to_scsilun(sc->device->lun, &fc_lun);
2418 * Issue abort and terminate on device reset request.
2419 * If q'ing of terminate fails, retry it after a delay.
2422 spin_lock_irqsave(io_lock, flags);
2423 if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
2424 spin_unlock_irqrestore(io_lock, flags);
2427 spin_unlock_irqrestore(io_lock, flags);
2428 if (fnic_queue_abort_io_req(fnic,
2429 tag | FNIC_TAG_DEV_RST,
2430 FCPIO_ITMF_ABT_TASK_TERM,
2431 fc_lun.scsi_lun, io_req)) {
2432 wait_for_completion_timeout(&tm_done,
2433 msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
2435 spin_lock_irqsave(io_lock, flags);
2436 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2437 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2438 io_req->abts_done = &tm_done;
2439 spin_unlock_irqrestore(io_lock, flags);
2440 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2441 "Abort and terminate issued on Device reset "
2442 "tag 0x%x sc 0x%p\n", tag, sc);
2447 spin_lock_irqsave(io_lock, flags);
2448 if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
2449 spin_unlock_irqrestore(io_lock, flags);
2450 wait_for_completion_timeout(&tm_done,
2451 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2454 io_req = (struct fnic_io_req *)CMD_SP(sc);
2455 io_req->abts_done = NULL;
2456 goto fnic_device_reset_clean;
2460 spin_unlock_irqrestore(io_lock, flags);
2463 /* Completed, but not successful, clean up the io_req, return fail */
2464 if (status != FCPIO_SUCCESS) {
2465 spin_lock_irqsave(io_lock, flags);
2466 FNIC_SCSI_DBG(KERN_DEBUG,
2468 "Device reset completed - failed\n");
2469 io_req = (struct fnic_io_req *)CMD_SP(sc);
2470 goto fnic_device_reset_clean;
2474 * Clean up any aborts on this lun that have still not
2475 * completed. If any of these fail, then LUN reset fails.
2476 * clean_pending_aborts cleans all cmds on this lun except
2477 * the lun reset cmd. If all cmds get cleaned, the lun reset
2480 if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
2481 spin_lock_irqsave(io_lock, flags);
2482 io_req = (struct fnic_io_req *)CMD_SP(sc);
2483 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2484 "Device reset failed"
2485 " since could not abort all IOs\n");
2486 goto fnic_device_reset_clean;
2489 /* Clean lun reset command */
2490 spin_lock_irqsave(io_lock, flags);
2491 io_req = (struct fnic_io_req *)CMD_SP(sc);
2493 /* Completed, and successful */
2496 fnic_device_reset_clean:
2500 spin_unlock_irqrestore(io_lock, flags);
2503 start_time = io_req->start_time;
2504 fnic_release_ioreq_buf(fnic, io_req, sc);
2505 mempool_free(io_req, fnic->io_req_pool);
2508 fnic_device_reset_end:
2509 FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
2510 sc->request->tag, sc,
2511 jiffies_to_msecs(jiffies - start_time),
2512 0, ((u64)sc->cmnd[0] << 32 |
2513 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2514 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2515 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2517 /* free tag if it is allocated */
2518 if (unlikely(tag_gen_flag))
2519 fnic_scsi_host_end_tag(fnic, sc);
2521 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2522 "Returning from device reset %s\n",
2524 "SUCCESS" : "FAILED");
2527 atomic64_inc(&reset_stats->device_reset_failures);
2532 /* Clean up all IOs, clean up libFC local port */
2533 int fnic_reset(struct Scsi_Host *shost)
2535 struct fc_lport *lp;
2538 struct reset_stats *reset_stats;
2540 lp = shost_priv(shost);
2541 fnic = lport_priv(lp);
2542 reset_stats = &fnic->fnic_stats.reset_stats;
2544 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2545 "fnic_reset called\n");
2547 atomic64_inc(&reset_stats->fnic_resets);
2550 * Reset local port, this will clean up libFC exchanges,
2551 * reset remote port sessions, and if link is up, begin flogi
2553 ret = lp->tt.lport_reset(lp);
2555 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2556 "Returning from fnic reset %s\n",
2558 "SUCCESS" : "FAILED");
2561 atomic64_inc(&reset_stats->fnic_reset_completions);
2563 atomic64_inc(&reset_stats->fnic_reset_failures);
2569 * SCSI Error handling calls driver's eh_host_reset if all prior
2570 * error handling levels return FAILED. If host reset completes
2571 * successfully, and if link is up, then Fabric login begins.
2573 * Host Reset is the highest level of error recovery. If this fails, then
2574 * host is offlined by SCSI.
2577 int fnic_host_reset(struct scsi_cmnd *sc)
2580 unsigned long wait_host_tmo;
2581 struct Scsi_Host *shost = sc->device->host;
2582 struct fc_lport *lp = shost_priv(shost);
2583 struct fnic *fnic = lport_priv(lp);
2584 unsigned long flags;
2586 spin_lock_irqsave(&fnic->fnic_lock, flags);
2587 if (fnic->internal_reset_inprogress == 0) {
2588 fnic->internal_reset_inprogress = 1;
2590 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2591 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2592 "host reset in progress skipping another host reset\n");
2595 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2598 * If fnic_reset is successful, wait for fabric login to complete
2599 * scsi-ml tries to send a TUR to every device if host reset is
2600 * successful, so before returning to scsi, fabric should be up
2602 ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
2603 if (ret == SUCCESS) {
2604 wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2606 while (time_before(jiffies, wait_host_tmo)) {
2607 if ((lp->state == LPORT_ST_READY) &&
2616 spin_lock_irqsave(&fnic->fnic_lock, flags);
2617 fnic->internal_reset_inprogress = 0;
2618 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2623 * This fxn is called from libFC when host is removed
2625 void fnic_scsi_abort_io(struct fc_lport *lp)
2628 unsigned long flags;
2629 enum fnic_state old_state;
2630 struct fnic *fnic = lport_priv(lp);
2631 DECLARE_COMPLETION_ONSTACK(remove_wait);
2633 /* Issue firmware reset for fnic, wait for reset to complete */
2635 spin_lock_irqsave(&fnic->fnic_lock, flags);
2636 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2637 /* fw reset is in progress, poll for its completion */
2638 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2639 schedule_timeout(msecs_to_jiffies(100));
2640 goto retry_fw_reset;
2643 fnic->remove_wait = &remove_wait;
2644 old_state = fnic->state;
2645 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2646 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2647 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2649 err = fnic_fw_reset_handler(fnic);
2651 spin_lock_irqsave(&fnic->fnic_lock, flags);
2652 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2653 fnic->state = old_state;
2654 fnic->remove_wait = NULL;
2655 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2659 /* Wait for firmware reset to complete */
2660 wait_for_completion_timeout(&remove_wait,
2661 msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
2663 spin_lock_irqsave(&fnic->fnic_lock, flags);
2664 fnic->remove_wait = NULL;
2665 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2666 "fnic_scsi_abort_io %s\n",
2667 (fnic->state == FNIC_IN_ETH_MODE) ?
2668 "SUCCESS" : "FAILED");
2669 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2674 * This fxn called from libFC to clean up driver IO state on link down
2676 void fnic_scsi_cleanup(struct fc_lport *lp)
2678 unsigned long flags;
2679 enum fnic_state old_state;
2680 struct fnic *fnic = lport_priv(lp);
2682 /* issue fw reset */
2684 spin_lock_irqsave(&fnic->fnic_lock, flags);
2685 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2686 /* fw reset is in progress, poll for its completion */
2687 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2688 schedule_timeout(msecs_to_jiffies(100));
2689 goto retry_fw_reset;
2691 old_state = fnic->state;
2692 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2693 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2694 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2696 if (fnic_fw_reset_handler(fnic)) {
2697 spin_lock_irqsave(&fnic->fnic_lock, flags);
2698 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2699 fnic->state = old_state;
2700 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2705 void fnic_empty_scsi_cleanup(struct fc_lport *lp)
2709 void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
2711 struct fnic *fnic = lport_priv(lp);
2713 /* Non-zero sid, nothing to do */
2715 goto call_fc_exch_mgr_reset;
2718 fnic_rport_exch_reset(fnic, did);
2719 goto call_fc_exch_mgr_reset;
2724 * link down or device being removed
2726 if (!fnic->in_remove)
2727 fnic_scsi_cleanup(lp);
2729 fnic_scsi_abort_io(lp);
2731 /* call libFC exch mgr reset to reset its exchanges */
2732 call_fc_exch_mgr_reset:
2733 fc_exch_mgr_reset(lp, sid, did);
2738 * fnic_is_abts_pending() is a helper function that
2739 * walks through tag map to check if there is any IOs pending,if there is one,
2740 * then it returns 1 (true), otherwise 0 (false)
2741 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2742 * otherwise, it checks for all IOs.
2744 int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2747 struct fnic_io_req *io_req;
2748 spinlock_t *io_lock;
2749 unsigned long flags;
2751 struct scsi_cmnd *sc;
2752 struct scsi_device *lun_dev = NULL;
2755 lun_dev = lr_sc->device;
2757 /* walk again to check, if IOs are still pending in fw */
2758 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2759 sc = scsi_host_find_tag(fnic->lport->host, tag);
2761 * ignore this lun reset cmd or cmds that do not belong to
2764 if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
2767 io_lock = fnic_io_lock_hash(fnic, sc);
2768 spin_lock_irqsave(io_lock, flags);
2770 io_req = (struct fnic_io_req *)CMD_SP(sc);
2772 if (!io_req || sc->device != lun_dev) {
2773 spin_unlock_irqrestore(io_lock, flags);
2778 * Found IO that is still pending with firmware and
2779 * belongs to the LUN that we are resetting
2781 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2782 "Found IO in %s on lun\n",
2783 fnic_ioreq_state_to_str(CMD_STATE(sc)));
2785 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2787 spin_unlock_irqrestore(io_lock, flags);