1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
39 #include <asm/set_memory.h>
42 #include <linux/nvme-fc-driver.h>
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_logmsg.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_debugfs.h"
58 #include "lpfc_vport.h"
59 #include "lpfc_version.h"
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
70 /* Provide function prototypes local to this module. */
71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74 uint8_t *, uint32_t *);
75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
81 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 struct lpfc_eqe *eqe, uint32_t qidx);
87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
90 struct lpfc_sli_ring *pring,
91 struct lpfc_iocbq *cmdiocb);
94 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
99 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
101 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
102 * @srcp: Source memory pointer.
103 * @destp: Destination memory pointer.
104 * @cnt: Number of words required to be copied.
105 * Must be a multiple of sizeof(uint64_t)
107 * This function is used for copying data between driver memory
108 * and the SLI WQ. This function also changes the endianness
109 * of each word if native endianness is different from SLI
110 * endianness. This function can be called with or without
114 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
116 uint64_t *src = srcp;
117 uint64_t *dest = destp;
120 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
124 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
128 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
129 * @q: The Work Queue to operate on.
130 * @wqe: The work Queue Entry to put on the Work queue.
132 * This routine will copy the contents of @wqe to the next available entry on
133 * the @q. This function will then ring the Work Queue Doorbell to signal the
134 * HBA to start processing the Work Queue Entry. This function returns 0 if
135 * successful. If no entries are available on @q then this function will return
137 * The caller is expected to hold the hbalock when calling this routine.
140 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
142 union lpfc_wqe *temp_wqe;
143 struct lpfc_register doorbell;
150 /* sanity check on queue memory */
153 temp_wqe = q->qe[q->host_index].wqe;
155 /* If the host has not yet processed the next entry then we are done */
156 idx = ((q->host_index + 1) % q->entry_count);
157 if (idx == q->hba_index) {
162 /* set consumption flag every once in a while */
163 if (!((q->host_index + 1) % q->entry_repost))
164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
166 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
167 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
168 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
169 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
170 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
171 /* write to DPP aperture taking advatage of Combined Writes */
172 tmp = (uint8_t *)temp_wqe;
174 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
175 __raw_writeq(*((uint64_t *)(tmp + i)),
178 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
179 __raw_writel(*((uint32_t *)(tmp + i)),
183 /* ensure WQE bcopy and DPP flushed before doorbell write */
186 /* Update the host index before invoking device */
187 host_index = q->host_index;
193 if (q->db_format == LPFC_DB_LIST_FORMAT) {
194 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
195 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
196 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
197 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
199 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
202 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
203 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
205 /* Leave bits <23:16> clear for if_type 6 dpp */
206 if_type = bf_get(lpfc_sli_intf_if_type,
207 &q->phba->sli4_hba.sli_intf);
208 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
209 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
212 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
213 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
214 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
218 writel(doorbell.word0, q->db_regaddr);
224 * lpfc_sli4_wq_release - Updates internal hba index for WQ
225 * @q: The Work Queue to operate on.
226 * @index: The index to advance the hba index to.
228 * This routine will update the HBA index of a queue to reflect consumption of
229 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
230 * an entry the host calls this function to update the queue's internal
231 * pointers. This routine returns the number of entries that were consumed by
235 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
237 uint32_t released = 0;
239 /* sanity check on queue memory */
243 if (q->hba_index == index)
246 q->hba_index = ((q->hba_index + 1) % q->entry_count);
248 } while (q->hba_index != index);
253 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
254 * @q: The Mailbox Queue to operate on.
255 * @wqe: The Mailbox Queue Entry to put on the Work queue.
257 * This routine will copy the contents of @mqe to the next available entry on
258 * the @q. This function will then ring the Work Queue Doorbell to signal the
259 * HBA to start processing the Work Queue Entry. This function returns 0 if
260 * successful. If no entries are available on @q then this function will return
262 * The caller is expected to hold the hbalock when calling this routine.
265 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
267 struct lpfc_mqe *temp_mqe;
268 struct lpfc_register doorbell;
270 /* sanity check on queue memory */
273 temp_mqe = q->qe[q->host_index].mqe;
275 /* If the host has not yet processed the next entry then we are done */
276 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
278 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
279 /* Save off the mailbox pointer for completion */
280 q->phba->mbox = (MAILBOX_t *)temp_mqe;
282 /* Update the host index before invoking device */
283 q->host_index = ((q->host_index + 1) % q->entry_count);
287 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
288 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
289 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
294 * lpfc_sli4_mq_release - Updates internal hba index for MQ
295 * @q: The Mailbox Queue to operate on.
297 * This routine will update the HBA index of a queue to reflect consumption of
298 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
299 * an entry the host calls this function to update the queue's internal
300 * pointers. This routine returns the number of entries that were consumed by
304 lpfc_sli4_mq_release(struct lpfc_queue *q)
306 /* sanity check on queue memory */
310 /* Clear the mailbox pointer for completion */
311 q->phba->mbox = NULL;
312 q->hba_index = ((q->hba_index + 1) % q->entry_count);
317 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
318 * @q: The Event Queue to get the first valid EQE from
320 * This routine will get the first valid Event Queue Entry from @q, update
321 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
322 * the Queue (no more work to do), or the Queue is full of EQEs that have been
323 * processed, but not popped back to the HBA then this routine will return NULL.
325 static struct lpfc_eqe *
326 lpfc_sli4_eq_get(struct lpfc_queue *q)
328 struct lpfc_hba *phba;
329 struct lpfc_eqe *eqe;
332 /* sanity check on queue memory */
336 eqe = q->qe[q->hba_index].eqe;
338 /* If the next EQE is not valid then we are done */
339 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
341 /* If the host has not yet processed the next entry then we are done */
342 idx = ((q->hba_index + 1) % q->entry_count);
343 if (idx == q->host_index)
347 /* if the index wrapped around, toggle the valid bit */
348 if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index)
349 q->qe_valid = (q->qe_valid) ? 0 : 1;
353 * insert barrier for instruction interlock : data from the hardware
354 * must have the valid bit checked before it can be copied and acted
355 * upon. Speculative instructions were allowing a bcopy at the start
356 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
357 * after our return, to copy data before the valid bit check above
358 * was done. As such, some of the copied data was stale. The barrier
359 * ensures the check is before any data is copied.
366 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
367 * @q: The Event Queue to disable interrupts
371 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
373 struct lpfc_register doorbell;
376 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
377 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
378 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
379 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
380 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
385 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
386 * @q: The Event Queue to disable interrupts
390 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
392 struct lpfc_register doorbell;
395 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
396 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
400 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
401 * @q: The Event Queue that the host has completed processing for.
402 * @arm: Indicates whether the host wants to arms this CQ.
404 * This routine will mark all Event Queue Entries on @q, from the last
405 * known completed entry to the last entry that was processed, as completed
406 * by clearing the valid bit for each completion queue entry. Then it will
407 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
408 * The internal host index in the @q will be updated by this routine to indicate
409 * that the host has finished processing the entries. The @arm parameter
410 * indicates that the queue should be rearmed when ringing the doorbell.
412 * This function will return the number of EQEs that were popped.
415 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
417 uint32_t released = 0;
418 struct lpfc_hba *phba;
419 struct lpfc_eqe *temp_eqe;
420 struct lpfc_register doorbell;
422 /* sanity check on queue memory */
427 /* while there are valid entries */
428 while (q->hba_index != q->host_index) {
429 if (!phba->sli4_hba.pc_sli4_params.eqav) {
430 temp_eqe = q->qe[q->host_index].eqe;
431 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
434 q->host_index = ((q->host_index + 1) % q->entry_count);
436 if (unlikely(released == 0 && !arm))
439 /* ring doorbell for number popped */
442 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
443 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
445 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
446 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
447 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
448 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
449 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
450 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
451 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
452 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
453 readl(q->phba->sli4_hba.EQDBregaddr);
458 * lpfc_sli4_if6_eq_release - Indicates the host has finished processing an EQ
459 * @q: The Event Queue that the host has completed processing for.
460 * @arm: Indicates whether the host wants to arms this CQ.
462 * This routine will mark all Event Queue Entries on @q, from the last
463 * known completed entry to the last entry that was processed, as completed
464 * by clearing the valid bit for each completion queue entry. Then it will
465 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
466 * The internal host index in the @q will be updated by this routine to indicate
467 * that the host has finished processing the entries. The @arm parameter
468 * indicates that the queue should be rearmed when ringing the doorbell.
470 * This function will return the number of EQEs that were popped.
473 lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm)
475 uint32_t released = 0;
476 struct lpfc_hba *phba;
477 struct lpfc_eqe *temp_eqe;
478 struct lpfc_register doorbell;
480 /* sanity check on queue memory */
485 /* while there are valid entries */
486 while (q->hba_index != q->host_index) {
487 if (!phba->sli4_hba.pc_sli4_params.eqav) {
488 temp_eqe = q->qe[q->host_index].eqe;
489 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
492 q->host_index = ((q->host_index + 1) % q->entry_count);
494 if (unlikely(released == 0 && !arm))
497 /* ring doorbell for number popped */
500 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
501 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released);
502 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
503 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
504 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
505 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
506 readl(q->phba->sli4_hba.EQDBregaddr);
511 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
512 * @q: The Completion Queue to get the first valid CQE from
514 * This routine will get the first valid Completion Queue Entry from @q, update
515 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
516 * the Queue (no more work to do), or the Queue is full of CQEs that have been
517 * processed, but not popped back to the HBA then this routine will return NULL.
519 static struct lpfc_cqe *
520 lpfc_sli4_cq_get(struct lpfc_queue *q)
522 struct lpfc_hba *phba;
523 struct lpfc_cqe *cqe;
526 /* sanity check on queue memory */
530 cqe = q->qe[q->hba_index].cqe;
532 /* If the next CQE is not valid then we are done */
533 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
535 /* If the host has not yet processed the next entry then we are done */
536 idx = ((q->hba_index + 1) % q->entry_count);
537 if (idx == q->host_index)
541 /* if the index wrapped around, toggle the valid bit */
542 if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index)
543 q->qe_valid = (q->qe_valid) ? 0 : 1;
546 * insert barrier for instruction interlock : data from the hardware
547 * must have the valid bit checked before it can be copied and acted
548 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
549 * instructions allowing action on content before valid bit checked,
550 * add barrier here as well. May not be needed as "content" is a
551 * single 32-bit entity here (vs multi word structure for cq's).
558 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
559 * @q: The Completion Queue that the host has completed processing for.
560 * @arm: Indicates whether the host wants to arms this CQ.
562 * This routine will mark all Completion queue entries on @q, from the last
563 * known completed entry to the last entry that was processed, as completed
564 * by clearing the valid bit for each completion queue entry. Then it will
565 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
566 * The internal host index in the @q will be updated by this routine to indicate
567 * that the host has finished processing the entries. The @arm parameter
568 * indicates that the queue should be rearmed when ringing the doorbell.
570 * This function will return the number of CQEs that were released.
573 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
575 uint32_t released = 0;
576 struct lpfc_hba *phba;
577 struct lpfc_cqe *temp_qe;
578 struct lpfc_register doorbell;
580 /* sanity check on queue memory */
585 /* while there are valid entries */
586 while (q->hba_index != q->host_index) {
587 if (!phba->sli4_hba.pc_sli4_params.cqav) {
588 temp_qe = q->qe[q->host_index].cqe;
589 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
592 q->host_index = ((q->host_index + 1) % q->entry_count);
594 if (unlikely(released == 0 && !arm))
597 /* ring doorbell for number popped */
600 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
601 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
602 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
603 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
604 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
605 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
606 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
611 * lpfc_sli4_if6_cq_release - Indicates the host has finished processing a CQ
612 * @q: The Completion Queue that the host has completed processing for.
613 * @arm: Indicates whether the host wants to arms this CQ.
615 * This routine will mark all Completion queue entries on @q, from the last
616 * known completed entry to the last entry that was processed, as completed
617 * by clearing the valid bit for each completion queue entry. Then it will
618 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
619 * The internal host index in the @q will be updated by this routine to indicate
620 * that the host has finished processing the entries. The @arm parameter
621 * indicates that the queue should be rearmed when ringing the doorbell.
623 * This function will return the number of CQEs that were released.
626 lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm)
628 uint32_t released = 0;
629 struct lpfc_hba *phba;
630 struct lpfc_cqe *temp_qe;
631 struct lpfc_register doorbell;
633 /* sanity check on queue memory */
638 /* while there are valid entries */
639 while (q->hba_index != q->host_index) {
640 if (!phba->sli4_hba.pc_sli4_params.cqav) {
641 temp_qe = q->qe[q->host_index].cqe;
642 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
645 q->host_index = ((q->host_index + 1) % q->entry_count);
647 if (unlikely(released == 0 && !arm))
650 /* ring doorbell for number popped */
653 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
654 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released);
655 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
656 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
661 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
662 * @q: The Header Receive Queue to operate on.
663 * @wqe: The Receive Queue Entry to put on the Receive queue.
665 * This routine will copy the contents of @wqe to the next available entry on
666 * the @q. This function will then ring the Receive Queue Doorbell to signal the
667 * HBA to start processing the Receive Queue Entry. This function returns the
668 * index that the rqe was copied to if successful. If no entries are available
669 * on @q then this function will return -ENOMEM.
670 * The caller is expected to hold the hbalock when calling this routine.
673 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
674 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
676 struct lpfc_rqe *temp_hrqe;
677 struct lpfc_rqe *temp_drqe;
678 struct lpfc_register doorbell;
682 /* sanity check on queue memory */
683 if (unlikely(!hq) || unlikely(!dq))
685 hq_put_index = hq->host_index;
686 dq_put_index = dq->host_index;
687 temp_hrqe = hq->qe[hq_put_index].rqe;
688 temp_drqe = dq->qe[dq_put_index].rqe;
690 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
692 if (hq_put_index != dq_put_index)
694 /* If the host has not yet processed the next entry then we are done */
695 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
697 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
698 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
700 /* Update the host index to point to the next slot */
701 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
702 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
705 /* Ring The Header Receive Queue Doorbell */
706 if (!(hq->host_index % hq->entry_repost)) {
708 if (hq->db_format == LPFC_DB_RING_FORMAT) {
709 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
711 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
712 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
713 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
715 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
717 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
721 writel(doorbell.word0, hq->db_regaddr);
727 * lpfc_sli4_rq_release - Updates internal hba index for RQ
728 * @q: The Header Receive Queue to operate on.
730 * This routine will update the HBA index of a queue to reflect consumption of
731 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
732 * consumed an entry the host calls this function to update the queue's
733 * internal pointers. This routine returns the number of entries that were
734 * consumed by the HBA.
737 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
739 /* sanity check on queue memory */
740 if (unlikely(!hq) || unlikely(!dq))
743 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
745 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
746 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
751 * lpfc_cmd_iocb - Get next command iocb entry in the ring
752 * @phba: Pointer to HBA context object.
753 * @pring: Pointer to driver SLI ring object.
755 * This function returns pointer to next command iocb entry
756 * in the command ring. The caller must hold hbalock to prevent
757 * other threads consume the next command iocb.
758 * SLI-2/SLI-3 provide different sized iocbs.
760 static inline IOCB_t *
761 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
763 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
764 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
768 * lpfc_resp_iocb - Get next response iocb entry in the ring
769 * @phba: Pointer to HBA context object.
770 * @pring: Pointer to driver SLI ring object.
772 * This function returns pointer to next response iocb entry
773 * in the response ring. The caller must hold hbalock to make sure
774 * that no other thread consume the next response iocb.
775 * SLI-2/SLI-3 provide different sized iocbs.
777 static inline IOCB_t *
778 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
780 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
781 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
785 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
786 * @phba: Pointer to HBA context object.
788 * This function is called with hbalock held. This function
789 * allocates a new driver iocb object from the iocb pool. If the
790 * allocation is successful, it returns pointer to the newly
791 * allocated iocb object else it returns NULL.
794 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
796 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
797 struct lpfc_iocbq * iocbq = NULL;
799 lockdep_assert_held(&phba->hbalock);
801 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
804 if (phba->iocb_cnt > phba->iocb_max)
805 phba->iocb_max = phba->iocb_cnt;
810 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
811 * @phba: Pointer to HBA context object.
812 * @xritag: XRI value.
814 * This function clears the sglq pointer from the array of acive
815 * sglq's. The xritag that is passed in is used to index into the
816 * array. Before the xritag can be used it needs to be adjusted
817 * by subtracting the xribase.
819 * Returns sglq ponter = success, NULL = Failure.
822 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
824 struct lpfc_sglq *sglq;
826 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
827 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
832 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
833 * @phba: Pointer to HBA context object.
834 * @xritag: XRI value.
836 * This function returns the sglq pointer from the array of acive
837 * sglq's. The xritag that is passed in is used to index into the
838 * array. Before the xritag can be used it needs to be adjusted
839 * by subtracting the xribase.
841 * Returns sglq ponter = success, NULL = Failure.
844 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
846 struct lpfc_sglq *sglq;
848 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
853 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
854 * @phba: Pointer to HBA context object.
855 * @xritag: xri used in this exchange.
856 * @rrq: The RRQ to be cleared.
860 lpfc_clr_rrq_active(struct lpfc_hba *phba,
862 struct lpfc_node_rrq *rrq)
864 struct lpfc_nodelist *ndlp = NULL;
866 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
867 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
869 /* The target DID could have been swapped (cable swap)
870 * we should use the ndlp from the findnode if it is
873 if ((!ndlp) && rrq->ndlp)
879 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
882 rrq->rrq_stop_time = 0;
885 mempool_free(rrq, phba->rrq_pool);
889 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
890 * @phba: Pointer to HBA context object.
892 * This function is called with hbalock held. This function
893 * Checks if stop_time (ratov from setting rrq active) has
894 * been reached, if it has and the send_rrq flag is set then
895 * it will call lpfc_send_rrq. If the send_rrq flag is not set
896 * then it will just call the routine to clear the rrq and
897 * free the rrq resource.
898 * The timer is set to the next rrq that is going to expire before
899 * leaving the routine.
903 lpfc_handle_rrq_active(struct lpfc_hba *phba)
905 struct lpfc_node_rrq *rrq;
906 struct lpfc_node_rrq *nextrrq;
907 unsigned long next_time;
908 unsigned long iflags;
911 spin_lock_irqsave(&phba->hbalock, iflags);
912 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
913 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
914 list_for_each_entry_safe(rrq, nextrrq,
915 &phba->active_rrq_list, list) {
916 if (time_after(jiffies, rrq->rrq_stop_time))
917 list_move(&rrq->list, &send_rrq);
918 else if (time_before(rrq->rrq_stop_time, next_time))
919 next_time = rrq->rrq_stop_time;
921 spin_unlock_irqrestore(&phba->hbalock, iflags);
922 if ((!list_empty(&phba->active_rrq_list)) &&
923 (!(phba->pport->load_flag & FC_UNLOADING)))
924 mod_timer(&phba->rrq_tmr, next_time);
925 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
926 list_del(&rrq->list);
928 /* this call will free the rrq */
929 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
930 else if (lpfc_send_rrq(phba, rrq)) {
931 /* if we send the rrq then the completion handler
932 * will clear the bit in the xribitmap.
934 lpfc_clr_rrq_active(phba, rrq->xritag,
941 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
942 * @vport: Pointer to vport context object.
943 * @xri: The xri used in the exchange.
944 * @did: The targets DID for this exchange.
946 * returns NULL = rrq not found in the phba->active_rrq_list.
947 * rrq = rrq for this xri and target.
949 struct lpfc_node_rrq *
950 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
952 struct lpfc_hba *phba = vport->phba;
953 struct lpfc_node_rrq *rrq;
954 struct lpfc_node_rrq *nextrrq;
955 unsigned long iflags;
957 if (phba->sli_rev != LPFC_SLI_REV4)
959 spin_lock_irqsave(&phba->hbalock, iflags);
960 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
961 if (rrq->vport == vport && rrq->xritag == xri &&
962 rrq->nlp_DID == did){
963 list_del(&rrq->list);
964 spin_unlock_irqrestore(&phba->hbalock, iflags);
968 spin_unlock_irqrestore(&phba->hbalock, iflags);
973 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
974 * @vport: Pointer to vport context object.
975 * @ndlp: Pointer to the lpfc_node_list structure.
976 * If ndlp is NULL Remove all active RRQs for this vport from the
977 * phba->active_rrq_list and clear the rrq.
978 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
981 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
984 struct lpfc_hba *phba = vport->phba;
985 struct lpfc_node_rrq *rrq;
986 struct lpfc_node_rrq *nextrrq;
987 unsigned long iflags;
990 if (phba->sli_rev != LPFC_SLI_REV4)
993 lpfc_sli4_vport_delete_els_xri_aborted(vport);
994 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
996 spin_lock_irqsave(&phba->hbalock, iflags);
997 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
998 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
999 list_move(&rrq->list, &rrq_list);
1000 spin_unlock_irqrestore(&phba->hbalock, iflags);
1002 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1003 list_del(&rrq->list);
1004 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1009 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1010 * @phba: Pointer to HBA context object.
1011 * @ndlp: Targets nodelist pointer for this exchange.
1012 * @xritag the xri in the bitmap to test.
1014 * This function is called with hbalock held. This function
1015 * returns 0 = rrq not active for this xri
1016 * 1 = rrq is valid for this xri.
1019 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1022 lockdep_assert_held(&phba->hbalock);
1025 if (!ndlp->active_rrqs_xri_bitmap)
1027 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1034 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1035 * @phba: Pointer to HBA context object.
1036 * @ndlp: nodelist pointer for this target.
1037 * @xritag: xri used in this exchange.
1038 * @rxid: Remote Exchange ID.
1039 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1041 * This function takes the hbalock.
1042 * The active bit is always set in the active rrq xri_bitmap even
1043 * if there is no slot avaiable for the other rrq information.
1045 * returns 0 rrq actived for this xri
1046 * < 0 No memory or invalid ndlp.
1049 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1050 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1052 unsigned long iflags;
1053 struct lpfc_node_rrq *rrq;
1059 if (!phba->cfg_enable_rrq)
1062 spin_lock_irqsave(&phba->hbalock, iflags);
1063 if (phba->pport->load_flag & FC_UNLOADING) {
1064 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1069 * set the active bit even if there is no mem available.
1071 if (NLP_CHK_FREE_REQ(ndlp))
1074 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1077 if (!ndlp->active_rrqs_xri_bitmap)
1080 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1083 spin_unlock_irqrestore(&phba->hbalock, iflags);
1084 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1086 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1087 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1088 " DID:0x%x Send:%d\n",
1089 xritag, rxid, ndlp->nlp_DID, send_rrq);
1092 if (phba->cfg_enable_rrq == 1)
1093 rrq->send_rrq = send_rrq;
1096 rrq->xritag = xritag;
1097 rrq->rrq_stop_time = jiffies +
1098 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1100 rrq->nlp_DID = ndlp->nlp_DID;
1101 rrq->vport = ndlp->vport;
1103 spin_lock_irqsave(&phba->hbalock, iflags);
1104 empty = list_empty(&phba->active_rrq_list);
1105 list_add_tail(&rrq->list, &phba->active_rrq_list);
1106 phba->hba_flag |= HBA_RRQ_ACTIVE;
1108 lpfc_worker_wake_up(phba);
1109 spin_unlock_irqrestore(&phba->hbalock, iflags);
1112 spin_unlock_irqrestore(&phba->hbalock, iflags);
1113 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1114 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1115 " DID:0x%x Send:%d\n",
1116 xritag, rxid, ndlp->nlp_DID, send_rrq);
1121 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1122 * @phba: Pointer to HBA context object.
1123 * @piocb: Pointer to the iocbq.
1125 * This function is called with the ring lock held. This function
1126 * gets a new driver sglq object from the sglq list. If the
1127 * list is not empty then it is successful, it returns pointer to the newly
1128 * allocated sglq object else it returns NULL.
1130 static struct lpfc_sglq *
1131 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1133 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1134 struct lpfc_sglq *sglq = NULL;
1135 struct lpfc_sglq *start_sglq = NULL;
1136 struct lpfc_scsi_buf *lpfc_cmd;
1137 struct lpfc_nodelist *ndlp;
1140 lockdep_assert_held(&phba->hbalock);
1142 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1143 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
1144 ndlp = lpfc_cmd->rdata->pnode;
1145 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1146 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1147 ndlp = piocbq->context_un.ndlp;
1148 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1149 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1152 ndlp = piocbq->context_un.ndlp;
1154 ndlp = piocbq->context1;
1157 spin_lock(&phba->sli4_hba.sgl_list_lock);
1158 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1163 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1164 test_bit(sglq->sli4_lxritag,
1165 ndlp->active_rrqs_xri_bitmap)) {
1166 /* This xri has an rrq outstanding for this DID.
1167 * put it back in the list and get another xri.
1169 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1171 list_remove_head(lpfc_els_sgl_list, sglq,
1172 struct lpfc_sglq, list);
1173 if (sglq == start_sglq) {
1174 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1182 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1183 sglq->state = SGL_ALLOCATED;
1185 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1190 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1191 * @phba: Pointer to HBA context object.
1192 * @piocb: Pointer to the iocbq.
1194 * This function is called with the sgl_list lock held. This function
1195 * gets a new driver sglq object from the sglq list. If the
1196 * list is not empty then it is successful, it returns pointer to the newly
1197 * allocated sglq object else it returns NULL.
1200 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1202 struct list_head *lpfc_nvmet_sgl_list;
1203 struct lpfc_sglq *sglq = NULL;
1205 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1207 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1209 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1212 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1213 sglq->state = SGL_ALLOCATED;
1218 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1219 * @phba: Pointer to HBA context object.
1221 * This function is called with no lock held. This function
1222 * allocates a new driver iocb object from the iocb pool. If the
1223 * allocation is successful, it returns pointer to the newly
1224 * allocated iocb object else it returns NULL.
1227 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1229 struct lpfc_iocbq * iocbq = NULL;
1230 unsigned long iflags;
1232 spin_lock_irqsave(&phba->hbalock, iflags);
1233 iocbq = __lpfc_sli_get_iocbq(phba);
1234 spin_unlock_irqrestore(&phba->hbalock, iflags);
1239 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1240 * @phba: Pointer to HBA context object.
1241 * @iocbq: Pointer to driver iocb object.
1243 * This function is called with hbalock held to release driver
1244 * iocb object to the iocb pool. The iotag in the iocb object
1245 * does not change for each use of the iocb object. This function
1246 * clears all other fields of the iocb object when it is freed.
1247 * The sqlq structure that holds the xritag and phys and virtual
1248 * mappings for the scatter gather list is retrieved from the
1249 * active array of sglq. The get of the sglq pointer also clears
1250 * the entry in the array. If the status of the IO indiactes that
1251 * this IO was aborted then the sglq entry it put on the
1252 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1253 * IO has good status or fails for any other reason then the sglq
1254 * entry is added to the free list (lpfc_els_sgl_list).
1257 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1259 struct lpfc_sglq *sglq;
1260 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1261 unsigned long iflag = 0;
1262 struct lpfc_sli_ring *pring;
1264 lockdep_assert_held(&phba->hbalock);
1266 if (iocbq->sli4_xritag == NO_XRI)
1269 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1273 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1274 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1276 sglq->state = SGL_FREED;
1278 list_add_tail(&sglq->list,
1279 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1280 spin_unlock_irqrestore(
1281 &phba->sli4_hba.sgl_list_lock, iflag);
1285 pring = phba->sli4_hba.els_wq->pring;
1286 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1287 (sglq->state != SGL_XRI_ABORTED)) {
1288 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1290 list_add(&sglq->list,
1291 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1292 spin_unlock_irqrestore(
1293 &phba->sli4_hba.sgl_list_lock, iflag);
1295 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1297 sglq->state = SGL_FREED;
1299 list_add_tail(&sglq->list,
1300 &phba->sli4_hba.lpfc_els_sgl_list);
1301 spin_unlock_irqrestore(
1302 &phba->sli4_hba.sgl_list_lock, iflag);
1304 /* Check if TXQ queue needs to be serviced */
1305 if (!list_empty(&pring->txq))
1306 lpfc_worker_wake_up(phba);
1312 * Clean all volatile data fields, preserve iotag and node struct.
1314 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1315 iocbq->sli4_lxritag = NO_XRI;
1316 iocbq->sli4_xritag = NO_XRI;
1317 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1319 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1324 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1325 * @phba: Pointer to HBA context object.
1326 * @iocbq: Pointer to driver iocb object.
1328 * This function is called with hbalock held to release driver
1329 * iocb object to the iocb pool. The iotag in the iocb object
1330 * does not change for each use of the iocb object. This function
1331 * clears all other fields of the iocb object when it is freed.
1334 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1336 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1338 lockdep_assert_held(&phba->hbalock);
1341 * Clean all volatile data fields, preserve iotag and node struct.
1343 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1344 iocbq->sli4_xritag = NO_XRI;
1345 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1349 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1350 * @phba: Pointer to HBA context object.
1351 * @iocbq: Pointer to driver iocb object.
1353 * This function is called with hbalock held to release driver
1354 * iocb object to the iocb pool. The iotag in the iocb object
1355 * does not change for each use of the iocb object. This function
1356 * clears all other fields of the iocb object when it is freed.
1359 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1361 lockdep_assert_held(&phba->hbalock);
1363 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1368 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1369 * @phba: Pointer to HBA context object.
1370 * @iocbq: Pointer to driver iocb object.
1372 * This function is called with no lock held to release the iocb to
1376 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1378 unsigned long iflags;
1381 * Clean all volatile data fields, preserve iotag and node struct.
1383 spin_lock_irqsave(&phba->hbalock, iflags);
1384 __lpfc_sli_release_iocbq(phba, iocbq);
1385 spin_unlock_irqrestore(&phba->hbalock, iflags);
1389 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1390 * @phba: Pointer to HBA context object.
1391 * @iocblist: List of IOCBs.
1392 * @ulpstatus: ULP status in IOCB command field.
1393 * @ulpWord4: ULP word-4 in IOCB command field.
1395 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1396 * on the list by invoking the complete callback function associated with the
1397 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1401 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1402 uint32_t ulpstatus, uint32_t ulpWord4)
1404 struct lpfc_iocbq *piocb;
1406 while (!list_empty(iocblist)) {
1407 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1408 if (!piocb->iocb_cmpl)
1409 lpfc_sli_release_iocbq(phba, piocb);
1411 piocb->iocb.ulpStatus = ulpstatus;
1412 piocb->iocb.un.ulpWord[4] = ulpWord4;
1413 (piocb->iocb_cmpl) (phba, piocb, piocb);
1420 * lpfc_sli_iocb_cmd_type - Get the iocb type
1421 * @iocb_cmnd: iocb command code.
1423 * This function is called by ring event handler function to get the iocb type.
1424 * This function translates the iocb command to an iocb command type used to
1425 * decide the final disposition of each completed IOCB.
1426 * The function returns
1427 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1428 * LPFC_SOL_IOCB if it is a solicited iocb completion
1429 * LPFC_ABORT_IOCB if it is an abort iocb
1430 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1432 * The caller is not required to hold any lock.
1434 static lpfc_iocb_type
1435 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1437 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1439 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1442 switch (iocb_cmnd) {
1443 case CMD_XMIT_SEQUENCE_CR:
1444 case CMD_XMIT_SEQUENCE_CX:
1445 case CMD_XMIT_BCAST_CN:
1446 case CMD_XMIT_BCAST_CX:
1447 case CMD_ELS_REQUEST_CR:
1448 case CMD_ELS_REQUEST_CX:
1449 case CMD_CREATE_XRI_CR:
1450 case CMD_CREATE_XRI_CX:
1451 case CMD_GET_RPI_CN:
1452 case CMD_XMIT_ELS_RSP_CX:
1453 case CMD_GET_RPI_CR:
1454 case CMD_FCP_IWRITE_CR:
1455 case CMD_FCP_IWRITE_CX:
1456 case CMD_FCP_IREAD_CR:
1457 case CMD_FCP_IREAD_CX:
1458 case CMD_FCP_ICMND_CR:
1459 case CMD_FCP_ICMND_CX:
1460 case CMD_FCP_TSEND_CX:
1461 case CMD_FCP_TRSP_CX:
1462 case CMD_FCP_TRECEIVE_CX:
1463 case CMD_FCP_AUTO_TRSP_CX:
1464 case CMD_ADAPTER_MSG:
1465 case CMD_ADAPTER_DUMP:
1466 case CMD_XMIT_SEQUENCE64_CR:
1467 case CMD_XMIT_SEQUENCE64_CX:
1468 case CMD_XMIT_BCAST64_CN:
1469 case CMD_XMIT_BCAST64_CX:
1470 case CMD_ELS_REQUEST64_CR:
1471 case CMD_ELS_REQUEST64_CX:
1472 case CMD_FCP_IWRITE64_CR:
1473 case CMD_FCP_IWRITE64_CX:
1474 case CMD_FCP_IREAD64_CR:
1475 case CMD_FCP_IREAD64_CX:
1476 case CMD_FCP_ICMND64_CR:
1477 case CMD_FCP_ICMND64_CX:
1478 case CMD_FCP_TSEND64_CX:
1479 case CMD_FCP_TRSP64_CX:
1480 case CMD_FCP_TRECEIVE64_CX:
1481 case CMD_GEN_REQUEST64_CR:
1482 case CMD_GEN_REQUEST64_CX:
1483 case CMD_XMIT_ELS_RSP64_CX:
1484 case DSSCMD_IWRITE64_CR:
1485 case DSSCMD_IWRITE64_CX:
1486 case DSSCMD_IREAD64_CR:
1487 case DSSCMD_IREAD64_CX:
1488 type = LPFC_SOL_IOCB;
1490 case CMD_ABORT_XRI_CN:
1491 case CMD_ABORT_XRI_CX:
1492 case CMD_CLOSE_XRI_CN:
1493 case CMD_CLOSE_XRI_CX:
1494 case CMD_XRI_ABORTED_CX:
1495 case CMD_ABORT_MXRI64_CN:
1496 case CMD_XMIT_BLS_RSP64_CX:
1497 type = LPFC_ABORT_IOCB;
1499 case CMD_RCV_SEQUENCE_CX:
1500 case CMD_RCV_ELS_REQ_CX:
1501 case CMD_RCV_SEQUENCE64_CX:
1502 case CMD_RCV_ELS_REQ64_CX:
1503 case CMD_ASYNC_STATUS:
1504 case CMD_IOCB_RCV_SEQ64_CX:
1505 case CMD_IOCB_RCV_ELS64_CX:
1506 case CMD_IOCB_RCV_CONT64_CX:
1507 case CMD_IOCB_RET_XRI64_CX:
1508 type = LPFC_UNSOL_IOCB;
1510 case CMD_IOCB_XMIT_MSEQ64_CR:
1511 case CMD_IOCB_XMIT_MSEQ64_CX:
1512 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1513 case CMD_IOCB_RCV_ELS_LIST64_CX:
1514 case CMD_IOCB_CLOSE_EXTENDED_CN:
1515 case CMD_IOCB_ABORT_EXTENDED_CN:
1516 case CMD_IOCB_RET_HBQE64_CN:
1517 case CMD_IOCB_FCP_IBIDIR64_CR:
1518 case CMD_IOCB_FCP_IBIDIR64_CX:
1519 case CMD_IOCB_FCP_ITASKMGT64_CX:
1520 case CMD_IOCB_LOGENTRY_CN:
1521 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1522 printk("%s - Unhandled SLI-3 Command x%x\n",
1523 __func__, iocb_cmnd);
1524 type = LPFC_UNKNOWN_IOCB;
1527 type = LPFC_UNKNOWN_IOCB;
1535 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1536 * @phba: Pointer to HBA context object.
1538 * This function is called from SLI initialization code
1539 * to configure every ring of the HBA's SLI interface. The
1540 * caller is not required to hold any lock. This function issues
1541 * a config_ring mailbox command for each ring.
1542 * This function returns zero if successful else returns a negative
1546 lpfc_sli_ring_map(struct lpfc_hba *phba)
1548 struct lpfc_sli *psli = &phba->sli;
1553 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1557 phba->link_state = LPFC_INIT_MBX_CMDS;
1558 for (i = 0; i < psli->num_rings; i++) {
1559 lpfc_config_ring(phba, i, pmb);
1560 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1561 if (rc != MBX_SUCCESS) {
1562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1563 "0446 Adapter failed to init (%d), "
1564 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1566 rc, pmbox->mbxCommand,
1567 pmbox->mbxStatus, i);
1568 phba->link_state = LPFC_HBA_ERROR;
1573 mempool_free(pmb, phba->mbox_mem_pool);
1578 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1579 * @phba: Pointer to HBA context object.
1580 * @pring: Pointer to driver SLI ring object.
1581 * @piocb: Pointer to the driver iocb object.
1583 * This function is called with hbalock held. The function adds the
1584 * new iocb to txcmplq of the given ring. This function always returns
1585 * 0. If this function is called for ELS ring, this function checks if
1586 * there is a vport associated with the ELS command. This function also
1587 * starts els_tmofunc timer if this is an ELS command.
1590 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1591 struct lpfc_iocbq *piocb)
1593 lockdep_assert_held(&phba->hbalock);
1597 list_add_tail(&piocb->list, &pring->txcmplq);
1598 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1600 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1601 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1602 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1603 BUG_ON(!piocb->vport);
1604 if (!(piocb->vport->load_flag & FC_UNLOADING))
1605 mod_timer(&piocb->vport->els_tmofunc,
1607 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1614 * lpfc_sli_ringtx_get - Get first element of the txq
1615 * @phba: Pointer to HBA context object.
1616 * @pring: Pointer to driver SLI ring object.
1618 * This function is called with hbalock held to get next
1619 * iocb in txq of the given ring. If there is any iocb in
1620 * the txq, the function returns first iocb in the list after
1621 * removing the iocb from the list, else it returns NULL.
1624 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1626 struct lpfc_iocbq *cmd_iocb;
1628 lockdep_assert_held(&phba->hbalock);
1630 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1635 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1636 * @phba: Pointer to HBA context object.
1637 * @pring: Pointer to driver SLI ring object.
1639 * This function is called with hbalock held and the caller must post the
1640 * iocb without releasing the lock. If the caller releases the lock,
1641 * iocb slot returned by the function is not guaranteed to be available.
1642 * The function returns pointer to the next available iocb slot if there
1643 * is available slot in the ring, else it returns NULL.
1644 * If the get index of the ring is ahead of the put index, the function
1645 * will post an error attention event to the worker thread to take the
1646 * HBA to offline state.
1649 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1651 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1652 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1654 lockdep_assert_held(&phba->hbalock);
1656 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1657 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1658 pring->sli.sli3.next_cmdidx = 0;
1660 if (unlikely(pring->sli.sli3.local_getidx ==
1661 pring->sli.sli3.next_cmdidx)) {
1663 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1665 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1666 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1667 "0315 Ring %d issue: portCmdGet %d "
1668 "is bigger than cmd ring %d\n",
1670 pring->sli.sli3.local_getidx,
1673 phba->link_state = LPFC_HBA_ERROR;
1675 * All error attention handlers are posted to
1678 phba->work_ha |= HA_ERATT;
1679 phba->work_hs = HS_FFER3;
1681 lpfc_worker_wake_up(phba);
1686 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1690 return lpfc_cmd_iocb(phba, pring);
1694 * lpfc_sli_next_iotag - Get an iotag for the iocb
1695 * @phba: Pointer to HBA context object.
1696 * @iocbq: Pointer to driver iocb object.
1698 * This function gets an iotag for the iocb. If there is no unused iotag and
1699 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1700 * array and assigns a new iotag.
1701 * The function returns the allocated iotag if successful, else returns zero.
1702 * Zero is not a valid iotag.
1703 * The caller is not required to hold any lock.
1706 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1708 struct lpfc_iocbq **new_arr;
1709 struct lpfc_iocbq **old_arr;
1711 struct lpfc_sli *psli = &phba->sli;
1714 spin_lock_irq(&phba->hbalock);
1715 iotag = psli->last_iotag;
1716 if(++iotag < psli->iocbq_lookup_len) {
1717 psli->last_iotag = iotag;
1718 psli->iocbq_lookup[iotag] = iocbq;
1719 spin_unlock_irq(&phba->hbalock);
1720 iocbq->iotag = iotag;
1722 } else if (psli->iocbq_lookup_len < (0xffff
1723 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1724 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1725 spin_unlock_irq(&phba->hbalock);
1726 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1729 spin_lock_irq(&phba->hbalock);
1730 old_arr = psli->iocbq_lookup;
1731 if (new_len <= psli->iocbq_lookup_len) {
1732 /* highly unprobable case */
1734 iotag = psli->last_iotag;
1735 if(++iotag < psli->iocbq_lookup_len) {
1736 psli->last_iotag = iotag;
1737 psli->iocbq_lookup[iotag] = iocbq;
1738 spin_unlock_irq(&phba->hbalock);
1739 iocbq->iotag = iotag;
1742 spin_unlock_irq(&phba->hbalock);
1745 if (psli->iocbq_lookup)
1746 memcpy(new_arr, old_arr,
1747 ((psli->last_iotag + 1) *
1748 sizeof (struct lpfc_iocbq *)));
1749 psli->iocbq_lookup = new_arr;
1750 psli->iocbq_lookup_len = new_len;
1751 psli->last_iotag = iotag;
1752 psli->iocbq_lookup[iotag] = iocbq;
1753 spin_unlock_irq(&phba->hbalock);
1754 iocbq->iotag = iotag;
1759 spin_unlock_irq(&phba->hbalock);
1761 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1762 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1769 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1770 * @phba: Pointer to HBA context object.
1771 * @pring: Pointer to driver SLI ring object.
1772 * @iocb: Pointer to iocb slot in the ring.
1773 * @nextiocb: Pointer to driver iocb object which need to be
1774 * posted to firmware.
1776 * This function is called with hbalock held to post a new iocb to
1777 * the firmware. This function copies the new iocb to ring iocb slot and
1778 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1779 * a completion call back for this iocb else the function will free the
1783 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1784 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1786 lockdep_assert_held(&phba->hbalock);
1790 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1793 if (pring->ringno == LPFC_ELS_RING) {
1794 lpfc_debugfs_slow_ring_trc(phba,
1795 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1796 *(((uint32_t *) &nextiocb->iocb) + 4),
1797 *(((uint32_t *) &nextiocb->iocb) + 6),
1798 *(((uint32_t *) &nextiocb->iocb) + 7));
1802 * Issue iocb command to adapter
1804 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1806 pring->stats.iocb_cmd++;
1809 * If there is no completion routine to call, we can release the
1810 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1811 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1813 if (nextiocb->iocb_cmpl)
1814 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1816 __lpfc_sli_release_iocbq(phba, nextiocb);
1819 * Let the HBA know what IOCB slot will be the next one the
1820 * driver will put a command into.
1822 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1823 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1827 * lpfc_sli_update_full_ring - Update the chip attention register
1828 * @phba: Pointer to HBA context object.
1829 * @pring: Pointer to driver SLI ring object.
1831 * The caller is not required to hold any lock for calling this function.
1832 * This function updates the chip attention bits for the ring to inform firmware
1833 * that there are pending work to be done for this ring and requests an
1834 * interrupt when there is space available in the ring. This function is
1835 * called when the driver is unable to post more iocbs to the ring due
1836 * to unavailability of space in the ring.
1839 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1841 int ringno = pring->ringno;
1843 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1848 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1849 * The HBA will tell us when an IOCB entry is available.
1851 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1852 readl(phba->CAregaddr); /* flush */
1854 pring->stats.iocb_cmd_full++;
1858 * lpfc_sli_update_ring - Update chip attention register
1859 * @phba: Pointer to HBA context object.
1860 * @pring: Pointer to driver SLI ring object.
1862 * This function updates the chip attention register bit for the
1863 * given ring to inform HBA that there is more work to be done
1864 * in this ring. The caller is not required to hold any lock.
1867 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1869 int ringno = pring->ringno;
1872 * Tell the HBA that there is work to do in this ring.
1874 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1876 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1877 readl(phba->CAregaddr); /* flush */
1882 * lpfc_sli_resume_iocb - Process iocbs in the txq
1883 * @phba: Pointer to HBA context object.
1884 * @pring: Pointer to driver SLI ring object.
1886 * This function is called with hbalock held to post pending iocbs
1887 * in the txq to the firmware. This function is called when driver
1888 * detects space available in the ring.
1891 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1894 struct lpfc_iocbq *nextiocb;
1896 lockdep_assert_held(&phba->hbalock);
1900 * (a) there is anything on the txq to send
1902 * (c) link attention events can be processed (fcp ring only)
1903 * (d) IOCB processing is not blocked by the outstanding mbox command.
1906 if (lpfc_is_link_up(phba) &&
1907 (!list_empty(&pring->txq)) &&
1908 (pring->ringno != LPFC_FCP_RING ||
1909 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1911 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1912 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1913 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1916 lpfc_sli_update_ring(phba, pring);
1918 lpfc_sli_update_full_ring(phba, pring);
1925 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1926 * @phba: Pointer to HBA context object.
1927 * @hbqno: HBQ number.
1929 * This function is called with hbalock held to get the next
1930 * available slot for the given HBQ. If there is free slot
1931 * available for the HBQ it will return pointer to the next available
1932 * HBQ entry else it will return NULL.
1934 static struct lpfc_hbq_entry *
1935 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1937 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1939 lockdep_assert_held(&phba->hbalock);
1941 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1942 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1943 hbqp->next_hbqPutIdx = 0;
1945 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1946 uint32_t raw_index = phba->hbq_get[hbqno];
1947 uint32_t getidx = le32_to_cpu(raw_index);
1949 hbqp->local_hbqGetIdx = getidx;
1951 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1952 lpfc_printf_log(phba, KERN_ERR,
1953 LOG_SLI | LOG_VPORT,
1954 "1802 HBQ %d: local_hbqGetIdx "
1955 "%u is > than hbqp->entry_count %u\n",
1956 hbqno, hbqp->local_hbqGetIdx,
1959 phba->link_state = LPFC_HBA_ERROR;
1963 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1967 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1972 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1973 * @phba: Pointer to HBA context object.
1975 * This function is called with no lock held to free all the
1976 * hbq buffers while uninitializing the SLI interface. It also
1977 * frees the HBQ buffers returned by the firmware but not yet
1978 * processed by the upper layers.
1981 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1983 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1984 struct hbq_dmabuf *hbq_buf;
1985 unsigned long flags;
1988 hbq_count = lpfc_sli_hbq_count();
1989 /* Return all memory used by all HBQs */
1990 spin_lock_irqsave(&phba->hbalock, flags);
1991 for (i = 0; i < hbq_count; ++i) {
1992 list_for_each_entry_safe(dmabuf, next_dmabuf,
1993 &phba->hbqs[i].hbq_buffer_list, list) {
1994 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1995 list_del(&hbq_buf->dbuf.list);
1996 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1998 phba->hbqs[i].buffer_count = 0;
2001 /* Mark the HBQs not in use */
2002 phba->hbq_in_use = 0;
2003 spin_unlock_irqrestore(&phba->hbalock, flags);
2007 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2008 * @phba: Pointer to HBA context object.
2009 * @hbqno: HBQ number.
2010 * @hbq_buf: Pointer to HBQ buffer.
2012 * This function is called with the hbalock held to post a
2013 * hbq buffer to the firmware. If the function finds an empty
2014 * slot in the HBQ, it will post the buffer. The function will return
2015 * pointer to the hbq entry if it successfully post the buffer
2016 * else it will return NULL.
2019 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2020 struct hbq_dmabuf *hbq_buf)
2022 lockdep_assert_held(&phba->hbalock);
2023 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2027 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2028 * @phba: Pointer to HBA context object.
2029 * @hbqno: HBQ number.
2030 * @hbq_buf: Pointer to HBQ buffer.
2032 * This function is called with the hbalock held to post a hbq buffer to the
2033 * firmware. If the function finds an empty slot in the HBQ, it will post the
2034 * buffer and place it on the hbq_buffer_list. The function will return zero if
2035 * it successfully post the buffer else it will return an error.
2038 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2039 struct hbq_dmabuf *hbq_buf)
2041 struct lpfc_hbq_entry *hbqe;
2042 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2044 lockdep_assert_held(&phba->hbalock);
2045 /* Get next HBQ entry slot to use */
2046 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2048 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2050 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2051 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2052 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2053 hbqe->bde.tus.f.bdeFlags = 0;
2054 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2055 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2057 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2058 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2060 readl(phba->hbq_put + hbqno);
2061 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2068 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2069 * @phba: Pointer to HBA context object.
2070 * @hbqno: HBQ number.
2071 * @hbq_buf: Pointer to HBQ buffer.
2073 * This function is called with the hbalock held to post an RQE to the SLI4
2074 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2075 * the hbq_buffer_list and return zero, otherwise it will return an error.
2078 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2079 struct hbq_dmabuf *hbq_buf)
2082 struct lpfc_rqe hrqe;
2083 struct lpfc_rqe drqe;
2084 struct lpfc_queue *hrq;
2085 struct lpfc_queue *drq;
2087 if (hbqno != LPFC_ELS_HBQ)
2089 hrq = phba->sli4_hba.hdr_rq;
2090 drq = phba->sli4_hba.dat_rq;
2092 lockdep_assert_held(&phba->hbalock);
2093 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2094 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2095 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2096 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2097 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2100 hbq_buf->tag = (rc | (hbqno << 16));
2101 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2105 /* HBQ for ELS and CT traffic. */
2106 static struct lpfc_hbq_init lpfc_els_hbq = {
2111 .ring_mask = (1 << LPFC_ELS_RING),
2118 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2123 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2124 * @phba: Pointer to HBA context object.
2125 * @hbqno: HBQ number.
2126 * @count: Number of HBQ buffers to be posted.
2128 * This function is called with no lock held to post more hbq buffers to the
2129 * given HBQ. The function returns the number of HBQ buffers successfully
2133 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2135 uint32_t i, posted = 0;
2136 unsigned long flags;
2137 struct hbq_dmabuf *hbq_buffer;
2138 LIST_HEAD(hbq_buf_list);
2139 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2142 if ((phba->hbqs[hbqno].buffer_count + count) >
2143 lpfc_hbq_defs[hbqno]->entry_count)
2144 count = lpfc_hbq_defs[hbqno]->entry_count -
2145 phba->hbqs[hbqno].buffer_count;
2148 /* Allocate HBQ entries */
2149 for (i = 0; i < count; i++) {
2150 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2153 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2155 /* Check whether HBQ is still in use */
2156 spin_lock_irqsave(&phba->hbalock, flags);
2157 if (!phba->hbq_in_use)
2159 while (!list_empty(&hbq_buf_list)) {
2160 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2162 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2164 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2165 phba->hbqs[hbqno].buffer_count++;
2168 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2170 spin_unlock_irqrestore(&phba->hbalock, flags);
2173 spin_unlock_irqrestore(&phba->hbalock, flags);
2174 while (!list_empty(&hbq_buf_list)) {
2175 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2177 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2183 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2184 * @phba: Pointer to HBA context object.
2187 * This function posts more buffers to the HBQ. This function
2188 * is called with no lock held. The function returns the number of HBQ entries
2189 * successfully allocated.
2192 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2194 if (phba->sli_rev == LPFC_SLI_REV4)
2197 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2198 lpfc_hbq_defs[qno]->add_count);
2202 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2203 * @phba: Pointer to HBA context object.
2204 * @qno: HBQ queue number.
2206 * This function is called from SLI initialization code path with
2207 * no lock held to post initial HBQ buffers to firmware. The
2208 * function returns the number of HBQ entries successfully allocated.
2211 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2213 if (phba->sli_rev == LPFC_SLI_REV4)
2214 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2215 lpfc_hbq_defs[qno]->entry_count);
2217 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2218 lpfc_hbq_defs[qno]->init_count);
2222 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2223 * @phba: Pointer to HBA context object.
2224 * @hbqno: HBQ number.
2226 * This function removes the first hbq buffer on an hbq list and returns a
2227 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2229 static struct hbq_dmabuf *
2230 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2232 struct lpfc_dmabuf *d_buf;
2234 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2237 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2241 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2242 * @phba: Pointer to HBA context object.
2243 * @hbqno: HBQ number.
2245 * This function removes the first RQ buffer on an RQ buffer list and returns a
2246 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2248 static struct rqb_dmabuf *
2249 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2251 struct lpfc_dmabuf *h_buf;
2252 struct lpfc_rqb *rqbp;
2255 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2256 struct lpfc_dmabuf, list);
2259 rqbp->buffer_count--;
2260 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2264 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2265 * @phba: Pointer to HBA context object.
2266 * @tag: Tag of the hbq buffer.
2268 * This function searches for the hbq buffer associated with the given tag in
2269 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2270 * otherwise it returns NULL.
2272 static struct hbq_dmabuf *
2273 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2275 struct lpfc_dmabuf *d_buf;
2276 struct hbq_dmabuf *hbq_buf;
2280 if (hbqno >= LPFC_MAX_HBQS)
2283 spin_lock_irq(&phba->hbalock);
2284 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2285 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2286 if (hbq_buf->tag == tag) {
2287 spin_unlock_irq(&phba->hbalock);
2291 spin_unlock_irq(&phba->hbalock);
2292 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2293 "1803 Bad hbq tag. Data: x%x x%x\n",
2294 tag, phba->hbqs[tag >> 16].buffer_count);
2299 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2300 * @phba: Pointer to HBA context object.
2301 * @hbq_buffer: Pointer to HBQ buffer.
2303 * This function is called with hbalock. This function gives back
2304 * the hbq buffer to firmware. If the HBQ does not have space to
2305 * post the buffer, it will free the buffer.
2308 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2313 hbqno = hbq_buffer->tag >> 16;
2314 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2315 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2320 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2321 * @mbxCommand: mailbox command code.
2323 * This function is called by the mailbox event handler function to verify
2324 * that the completed mailbox command is a legitimate mailbox command. If the
2325 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2326 * and the mailbox event handler will take the HBA offline.
2329 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2333 switch (mbxCommand) {
2337 case MBX_WRITE_VPARMS:
2338 case MBX_RUN_BIU_DIAG:
2341 case MBX_CONFIG_LINK:
2342 case MBX_CONFIG_RING:
2343 case MBX_RESET_RING:
2344 case MBX_READ_CONFIG:
2345 case MBX_READ_RCONFIG:
2346 case MBX_READ_SPARM:
2347 case MBX_READ_STATUS:
2351 case MBX_READ_LNK_STAT:
2353 case MBX_UNREG_LOGIN:
2355 case MBX_DUMP_MEMORY:
2356 case MBX_DUMP_CONTEXT:
2359 case MBX_UPDATE_CFG:
2361 case MBX_DEL_LD_ENTRY:
2362 case MBX_RUN_PROGRAM:
2364 case MBX_SET_VARIABLE:
2365 case MBX_UNREG_D_ID:
2366 case MBX_KILL_BOARD:
2367 case MBX_CONFIG_FARP:
2370 case MBX_RUN_BIU_DIAG64:
2371 case MBX_CONFIG_PORT:
2372 case MBX_READ_SPARM64:
2373 case MBX_READ_RPI64:
2374 case MBX_REG_LOGIN64:
2375 case MBX_READ_TOPOLOGY:
2378 case MBX_LOAD_EXP_ROM:
2379 case MBX_ASYNCEVT_ENABLE:
2383 case MBX_PORT_CAPABILITIES:
2384 case MBX_PORT_IOV_CONTROL:
2385 case MBX_SLI4_CONFIG:
2386 case MBX_SLI4_REQ_FTRS:
2388 case MBX_UNREG_FCFI:
2393 case MBX_RESUME_RPI:
2394 case MBX_READ_EVENT_LOG_STATUS:
2395 case MBX_READ_EVENT_LOG:
2396 case MBX_SECURITY_MGMT:
2398 case MBX_ACCESS_VDATA:
2409 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2410 * @phba: Pointer to HBA context object.
2411 * @pmboxq: Pointer to mailbox command.
2413 * This is completion handler function for mailbox commands issued from
2414 * lpfc_sli_issue_mbox_wait function. This function is called by the
2415 * mailbox event handler function with no lock held. This function
2416 * will wake up thread waiting on the wait queue pointed by context1
2420 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2422 unsigned long drvr_flag;
2423 struct completion *pmbox_done;
2426 * If pmbox_done is empty, the driver thread gave up waiting and
2427 * continued running.
2429 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2430 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2431 pmbox_done = (struct completion *)pmboxq->context3;
2433 complete(pmbox_done);
2434 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2440 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2441 * @phba: Pointer to HBA context object.
2442 * @pmb: Pointer to mailbox object.
2444 * This function is the default mailbox completion handler. It
2445 * frees the memory resources associated with the completed mailbox
2446 * command. If the completed command is a REG_LOGIN mailbox command,
2447 * this function will issue a UREG_LOGIN to re-claim the RPI.
2450 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2452 struct lpfc_vport *vport = pmb->vport;
2453 struct lpfc_dmabuf *mp;
2454 struct lpfc_nodelist *ndlp;
2455 struct Scsi_Host *shost;
2459 mp = (struct lpfc_dmabuf *) (pmb->context1);
2462 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2467 * If a REG_LOGIN succeeded after node is destroyed or node
2468 * is in re-discovery driver need to cleanup the RPI.
2470 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2471 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2472 !pmb->u.mb.mbxStatus) {
2473 rpi = pmb->u.mb.un.varWords[0];
2474 vpi = pmb->u.mb.un.varRegLogin.vpi;
2475 if (phba->sli_rev == LPFC_SLI_REV4)
2476 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2477 lpfc_unreg_login(phba, vpi, rpi, pmb);
2479 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2480 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2481 if (rc != MBX_NOT_FINISHED)
2485 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2486 !(phba->pport->load_flag & FC_UNLOADING) &&
2487 !pmb->u.mb.mbxStatus) {
2488 shost = lpfc_shost_from_vport(vport);
2489 spin_lock_irq(shost->host_lock);
2490 vport->vpi_state |= LPFC_VPI_REGISTERED;
2491 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2492 spin_unlock_irq(shost->host_lock);
2495 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2496 ndlp = (struct lpfc_nodelist *)pmb->context2;
2498 pmb->context2 = NULL;
2501 /* Check security permission status on INIT_LINK mailbox command */
2502 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2503 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2504 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2505 "2860 SLI authentication is required "
2506 "for INIT_LINK but has not done yet\n");
2508 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2509 lpfc_sli4_mbox_cmd_free(phba, pmb);
2511 mempool_free(pmb, phba->mbox_mem_pool);
2514 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2515 * @phba: Pointer to HBA context object.
2516 * @pmb: Pointer to mailbox object.
2518 * This function is the unreg rpi mailbox completion handler. It
2519 * frees the memory resources associated with the completed mailbox
2520 * command. An additional refrenece is put on the ndlp to prevent
2521 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2522 * the unreg mailbox command completes, this routine puts the
2527 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2529 struct lpfc_vport *vport = pmb->vport;
2530 struct lpfc_nodelist *ndlp;
2532 ndlp = pmb->context1;
2533 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2534 if (phba->sli_rev == LPFC_SLI_REV4 &&
2535 (bf_get(lpfc_sli_intf_if_type,
2536 &phba->sli4_hba.sli_intf) >=
2537 LPFC_SLI_INTF_IF_TYPE_2)) {
2539 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2540 "0010 UNREG_LOGIN vpi:%x "
2541 "rpi:%x DID:%x map:%x %p\n",
2542 vport->vpi, ndlp->nlp_rpi,
2544 ndlp->nlp_usg_map, ndlp);
2545 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2551 mempool_free(pmb, phba->mbox_mem_pool);
2555 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2556 * @phba: Pointer to HBA context object.
2558 * This function is called with no lock held. This function processes all
2559 * the completed mailbox commands and gives it to upper layers. The interrupt
2560 * service routine processes mailbox completion interrupt and adds completed
2561 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2562 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2563 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2564 * function returns the mailbox commands to the upper layer by calling the
2565 * completion handler function of each mailbox.
2568 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2575 phba->sli.slistat.mbox_event++;
2577 /* Get all completed mailboxe buffers into the cmplq */
2578 spin_lock_irq(&phba->hbalock);
2579 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2580 spin_unlock_irq(&phba->hbalock);
2582 /* Get a Mailbox buffer to setup mailbox commands for callback */
2584 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2590 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2592 lpfc_debugfs_disc_trc(pmb->vport,
2593 LPFC_DISC_TRC_MBOX_VPORT,
2594 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2595 (uint32_t)pmbox->mbxCommand,
2596 pmbox->un.varWords[0],
2597 pmbox->un.varWords[1]);
2600 lpfc_debugfs_disc_trc(phba->pport,
2602 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2603 (uint32_t)pmbox->mbxCommand,
2604 pmbox->un.varWords[0],
2605 pmbox->un.varWords[1]);
2610 * It is a fatal error if unknown mbox command completion.
2612 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2614 /* Unknown mailbox command compl */
2615 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2616 "(%d):0323 Unknown Mailbox command "
2617 "x%x (x%x/x%x) Cmpl\n",
2618 pmb->vport ? pmb->vport->vpi : 0,
2620 lpfc_sli_config_mbox_subsys_get(phba,
2622 lpfc_sli_config_mbox_opcode_get(phba,
2624 phba->link_state = LPFC_HBA_ERROR;
2625 phba->work_hs = HS_FFER3;
2626 lpfc_handle_eratt(phba);
2630 if (pmbox->mbxStatus) {
2631 phba->sli.slistat.mbox_stat_err++;
2632 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2633 /* Mbox cmd cmpl error - RETRYing */
2634 lpfc_printf_log(phba, KERN_INFO,
2636 "(%d):0305 Mbox cmd cmpl "
2637 "error - RETRYing Data: x%x "
2638 "(x%x/x%x) x%x x%x x%x\n",
2639 pmb->vport ? pmb->vport->vpi : 0,
2641 lpfc_sli_config_mbox_subsys_get(phba,
2643 lpfc_sli_config_mbox_opcode_get(phba,
2646 pmbox->un.varWords[0],
2647 pmb->vport->port_state);
2648 pmbox->mbxStatus = 0;
2649 pmbox->mbxOwner = OWN_HOST;
2650 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2651 if (rc != MBX_NOT_FINISHED)
2656 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2657 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2658 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2659 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2661 pmb->vport ? pmb->vport->vpi : 0,
2663 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2664 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2666 *((uint32_t *) pmbox),
2667 pmbox->un.varWords[0],
2668 pmbox->un.varWords[1],
2669 pmbox->un.varWords[2],
2670 pmbox->un.varWords[3],
2671 pmbox->un.varWords[4],
2672 pmbox->un.varWords[5],
2673 pmbox->un.varWords[6],
2674 pmbox->un.varWords[7],
2675 pmbox->un.varWords[8],
2676 pmbox->un.varWords[9],
2677 pmbox->un.varWords[10]);
2680 pmb->mbox_cmpl(phba,pmb);
2686 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2687 * @phba: Pointer to HBA context object.
2688 * @pring: Pointer to driver SLI ring object.
2691 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2692 * is set in the tag the buffer is posted for a particular exchange,
2693 * the function will return the buffer without replacing the buffer.
2694 * If the buffer is for unsolicited ELS or CT traffic, this function
2695 * returns the buffer and also posts another buffer to the firmware.
2697 static struct lpfc_dmabuf *
2698 lpfc_sli_get_buff(struct lpfc_hba *phba,
2699 struct lpfc_sli_ring *pring,
2702 struct hbq_dmabuf *hbq_entry;
2704 if (tag & QUE_BUFTAG_BIT)
2705 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2706 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2709 return &hbq_entry->dbuf;
2713 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2714 * @phba: Pointer to HBA context object.
2715 * @pring: Pointer to driver SLI ring object.
2716 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2717 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2718 * @fch_type: the type for the first frame of the sequence.
2720 * This function is called with no lock held. This function uses the r_ctl and
2721 * type of the received sequence to find the correct callback function to call
2722 * to process the sequence.
2725 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2726 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2733 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2739 /* unSolicited Responses */
2740 if (pring->prt[0].profile) {
2741 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2742 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2746 /* We must search, based on rctl / type
2747 for the right routine */
2748 for (i = 0; i < pring->num_mask; i++) {
2749 if ((pring->prt[i].rctl == fch_r_ctl) &&
2750 (pring->prt[i].type == fch_type)) {
2751 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2752 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2753 (phba, pring, saveq);
2761 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2762 * @phba: Pointer to HBA context object.
2763 * @pring: Pointer to driver SLI ring object.
2764 * @saveq: Pointer to the unsolicited iocb.
2766 * This function is called with no lock held by the ring event handler
2767 * when there is an unsolicited iocb posted to the response ring by the
2768 * firmware. This function gets the buffer associated with the iocbs
2769 * and calls the event handler for the ring. This function handles both
2770 * qring buffers and hbq buffers.
2771 * When the function returns 1 the caller can free the iocb object otherwise
2772 * upper layer functions will free the iocb objects.
2775 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2776 struct lpfc_iocbq *saveq)
2780 uint32_t Rctl, Type;
2781 struct lpfc_iocbq *iocbq;
2782 struct lpfc_dmabuf *dmzbuf;
2784 irsp = &(saveq->iocb);
2786 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2787 if (pring->lpfc_sli_rcv_async_status)
2788 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2790 lpfc_printf_log(phba,
2793 "0316 Ring %d handler: unexpected "
2794 "ASYNC_STATUS iocb received evt_code "
2797 irsp->un.asyncstat.evt_code);
2801 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2802 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2803 if (irsp->ulpBdeCount > 0) {
2804 dmzbuf = lpfc_sli_get_buff(phba, pring,
2805 irsp->un.ulpWord[3]);
2806 lpfc_in_buf_free(phba, dmzbuf);
2809 if (irsp->ulpBdeCount > 1) {
2810 dmzbuf = lpfc_sli_get_buff(phba, pring,
2811 irsp->unsli3.sli3Words[3]);
2812 lpfc_in_buf_free(phba, dmzbuf);
2815 if (irsp->ulpBdeCount > 2) {
2816 dmzbuf = lpfc_sli_get_buff(phba, pring,
2817 irsp->unsli3.sli3Words[7]);
2818 lpfc_in_buf_free(phba, dmzbuf);
2824 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2825 if (irsp->ulpBdeCount != 0) {
2826 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2827 irsp->un.ulpWord[3]);
2828 if (!saveq->context2)
2829 lpfc_printf_log(phba,
2832 "0341 Ring %d Cannot find buffer for "
2833 "an unsolicited iocb. tag 0x%x\n",
2835 irsp->un.ulpWord[3]);
2837 if (irsp->ulpBdeCount == 2) {
2838 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2839 irsp->unsli3.sli3Words[7]);
2840 if (!saveq->context3)
2841 lpfc_printf_log(phba,
2844 "0342 Ring %d Cannot find buffer for an"
2845 " unsolicited iocb. tag 0x%x\n",
2847 irsp->unsli3.sli3Words[7]);
2849 list_for_each_entry(iocbq, &saveq->list, list) {
2850 irsp = &(iocbq->iocb);
2851 if (irsp->ulpBdeCount != 0) {
2852 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2853 irsp->un.ulpWord[3]);
2854 if (!iocbq->context2)
2855 lpfc_printf_log(phba,
2858 "0343 Ring %d Cannot find "
2859 "buffer for an unsolicited iocb"
2860 ". tag 0x%x\n", pring->ringno,
2861 irsp->un.ulpWord[3]);
2863 if (irsp->ulpBdeCount == 2) {
2864 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2865 irsp->unsli3.sli3Words[7]);
2866 if (!iocbq->context3)
2867 lpfc_printf_log(phba,
2870 "0344 Ring %d Cannot find "
2871 "buffer for an unsolicited "
2874 irsp->unsli3.sli3Words[7]);
2878 if (irsp->ulpBdeCount != 0 &&
2879 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2880 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2883 /* search continue save q for same XRI */
2884 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2885 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2886 saveq->iocb.unsli3.rcvsli3.ox_id) {
2887 list_add_tail(&saveq->list, &iocbq->list);
2893 list_add_tail(&saveq->clist,
2894 &pring->iocb_continue_saveq);
2895 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2896 list_del_init(&iocbq->clist);
2898 irsp = &(saveq->iocb);
2902 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2903 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2904 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2905 Rctl = FC_RCTL_ELS_REQ;
2908 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2909 Rctl = w5p->hcsw.Rctl;
2910 Type = w5p->hcsw.Type;
2912 /* Firmware Workaround */
2913 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2914 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2915 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2916 Rctl = FC_RCTL_ELS_REQ;
2918 w5p->hcsw.Rctl = Rctl;
2919 w5p->hcsw.Type = Type;
2923 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2924 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2925 "0313 Ring %d handler: unexpected Rctl x%x "
2926 "Type x%x received\n",
2927 pring->ringno, Rctl, Type);
2933 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2934 * @phba: Pointer to HBA context object.
2935 * @pring: Pointer to driver SLI ring object.
2936 * @prspiocb: Pointer to response iocb object.
2938 * This function looks up the iocb_lookup table to get the command iocb
2939 * corresponding to the given response iocb using the iotag of the
2940 * response iocb. This function is called with the hbalock held
2941 * for sli3 devices or the ring_lock for sli4 devices.
2942 * This function returns the command iocb object if it finds the command
2943 * iocb else returns NULL.
2945 static struct lpfc_iocbq *
2946 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2947 struct lpfc_sli_ring *pring,
2948 struct lpfc_iocbq *prspiocb)
2950 struct lpfc_iocbq *cmd_iocb = NULL;
2952 lockdep_assert_held(&phba->hbalock);
2954 iotag = prspiocb->iocb.ulpIoTag;
2956 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2957 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2958 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2959 /* remove from txcmpl queue list */
2960 list_del_init(&cmd_iocb->list);
2961 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2966 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2967 "0317 iotag x%x is out of "
2968 "range: max iotag x%x wd0 x%x\n",
2969 iotag, phba->sli.last_iotag,
2970 *(((uint32_t *) &prspiocb->iocb) + 7));
2975 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2976 * @phba: Pointer to HBA context object.
2977 * @pring: Pointer to driver SLI ring object.
2980 * This function looks up the iocb_lookup table to get the command iocb
2981 * corresponding to the given iotag. This function is called with the
2983 * This function returns the command iocb object if it finds the command
2984 * iocb else returns NULL.
2986 static struct lpfc_iocbq *
2987 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2988 struct lpfc_sli_ring *pring, uint16_t iotag)
2990 struct lpfc_iocbq *cmd_iocb = NULL;
2992 lockdep_assert_held(&phba->hbalock);
2993 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2994 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2995 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2996 /* remove from txcmpl queue list */
2997 list_del_init(&cmd_iocb->list);
2998 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3003 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3004 "0372 iotag x%x lookup error: max iotag (x%x) "
3006 iotag, phba->sli.last_iotag,
3007 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3012 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3013 * @phba: Pointer to HBA context object.
3014 * @pring: Pointer to driver SLI ring object.
3015 * @saveq: Pointer to the response iocb to be processed.
3017 * This function is called by the ring event handler for non-fcp
3018 * rings when there is a new response iocb in the response ring.
3019 * The caller is not required to hold any locks. This function
3020 * gets the command iocb associated with the response iocb and
3021 * calls the completion handler for the command iocb. If there
3022 * is no completion handler, the function will free the resources
3023 * associated with command iocb. If the response iocb is for
3024 * an already aborted command iocb, the status of the completion
3025 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3026 * This function always returns 1.
3029 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3030 struct lpfc_iocbq *saveq)
3032 struct lpfc_iocbq *cmdiocbp;
3034 unsigned long iflag;
3036 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
3037 if (phba->sli_rev == LPFC_SLI_REV4)
3038 spin_lock_irqsave(&pring->ring_lock, iflag);
3040 spin_lock_irqsave(&phba->hbalock, iflag);
3041 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3042 if (phba->sli_rev == LPFC_SLI_REV4)
3043 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3045 spin_unlock_irqrestore(&phba->hbalock, iflag);
3048 if (cmdiocbp->iocb_cmpl) {
3050 * If an ELS command failed send an event to mgmt
3053 if (saveq->iocb.ulpStatus &&
3054 (pring->ringno == LPFC_ELS_RING) &&
3055 (cmdiocbp->iocb.ulpCommand ==
3056 CMD_ELS_REQUEST64_CR))
3057 lpfc_send_els_failure_event(phba,
3061 * Post all ELS completions to the worker thread.
3062 * All other are passed to the completion callback.
3064 if (pring->ringno == LPFC_ELS_RING) {
3065 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3066 (cmdiocbp->iocb_flag &
3067 LPFC_DRIVER_ABORTED)) {
3068 spin_lock_irqsave(&phba->hbalock,
3070 cmdiocbp->iocb_flag &=
3071 ~LPFC_DRIVER_ABORTED;
3072 spin_unlock_irqrestore(&phba->hbalock,
3074 saveq->iocb.ulpStatus =
3075 IOSTAT_LOCAL_REJECT;
3076 saveq->iocb.un.ulpWord[4] =
3079 /* Firmware could still be in progress
3080 * of DMAing payload, so don't free data
3081 * buffer till after a hbeat.
3083 spin_lock_irqsave(&phba->hbalock,
3085 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3086 spin_unlock_irqrestore(&phba->hbalock,
3089 if (phba->sli_rev == LPFC_SLI_REV4) {
3090 if (saveq->iocb_flag &
3091 LPFC_EXCHANGE_BUSY) {
3092 /* Set cmdiocb flag for the
3093 * exchange busy so sgl (xri)
3094 * will not be released until
3095 * the abort xri is received
3099 &phba->hbalock, iflag);
3100 cmdiocbp->iocb_flag |=
3102 spin_unlock_irqrestore(
3103 &phba->hbalock, iflag);
3105 if (cmdiocbp->iocb_flag &
3106 LPFC_DRIVER_ABORTED) {
3108 * Clear LPFC_DRIVER_ABORTED
3109 * bit in case it was driver
3113 &phba->hbalock, iflag);
3114 cmdiocbp->iocb_flag &=
3115 ~LPFC_DRIVER_ABORTED;
3116 spin_unlock_irqrestore(
3117 &phba->hbalock, iflag);
3118 cmdiocbp->iocb.ulpStatus =
3119 IOSTAT_LOCAL_REJECT;
3120 cmdiocbp->iocb.un.ulpWord[4] =
3121 IOERR_ABORT_REQUESTED;
3123 * For SLI4, irsiocb contains
3124 * NO_XRI in sli_xritag, it
3125 * shall not affect releasing
3126 * sgl (xri) process.
3128 saveq->iocb.ulpStatus =
3129 IOSTAT_LOCAL_REJECT;
3130 saveq->iocb.un.ulpWord[4] =
3133 &phba->hbalock, iflag);
3135 LPFC_DELAY_MEM_FREE;
3136 spin_unlock_irqrestore(
3137 &phba->hbalock, iflag);
3141 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3143 lpfc_sli_release_iocbq(phba, cmdiocbp);
3146 * Unknown initiating command based on the response iotag.
3147 * This could be the case on the ELS ring because of
3150 if (pring->ringno != LPFC_ELS_RING) {
3152 * Ring <ringno> handler: unexpected completion IoTag
3155 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3156 "0322 Ring %d handler: "
3157 "unexpected completion IoTag x%x "
3158 "Data: x%x x%x x%x x%x\n",
3160 saveq->iocb.ulpIoTag,
3161 saveq->iocb.ulpStatus,
3162 saveq->iocb.un.ulpWord[4],
3163 saveq->iocb.ulpCommand,
3164 saveq->iocb.ulpContext);
3172 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3173 * @phba: Pointer to HBA context object.
3174 * @pring: Pointer to driver SLI ring object.
3176 * This function is called from the iocb ring event handlers when
3177 * put pointer is ahead of the get pointer for a ring. This function signal
3178 * an error attention condition to the worker thread and the worker
3179 * thread will transition the HBA to offline state.
3182 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3184 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3186 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3187 * rsp ring <portRspMax>
3189 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3190 "0312 Ring %d handler: portRspPut %d "
3191 "is bigger than rsp ring %d\n",
3192 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3193 pring->sli.sli3.numRiocb);
3195 phba->link_state = LPFC_HBA_ERROR;
3198 * All error attention handlers are posted to
3201 phba->work_ha |= HA_ERATT;
3202 phba->work_hs = HS_FFER3;
3204 lpfc_worker_wake_up(phba);
3210 * lpfc_poll_eratt - Error attention polling timer timeout handler
3211 * @ptr: Pointer to address of HBA context object.
3213 * This function is invoked by the Error Attention polling timer when the
3214 * timer times out. It will check the SLI Error Attention register for
3215 * possible attention events. If so, it will post an Error Attention event
3216 * and wake up worker thread to process it. Otherwise, it will set up the
3217 * Error Attention polling timer for the next poll.
3219 void lpfc_poll_eratt(struct timer_list *t)
3221 struct lpfc_hba *phba;
3223 uint64_t sli_intr, cnt;
3225 phba = from_timer(phba, t, eratt_poll);
3227 /* Here we will also keep track of interrupts per sec of the hba */
3228 sli_intr = phba->sli.slistat.sli_intr;
3230 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3231 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3234 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3236 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3237 do_div(cnt, phba->eratt_poll_interval);
3238 phba->sli.slistat.sli_ips = cnt;
3240 phba->sli.slistat.sli_prev_intr = sli_intr;
3242 /* Check chip HA register for error event */
3243 eratt = lpfc_sli_check_eratt(phba);
3246 /* Tell the worker thread there is work to do */
3247 lpfc_worker_wake_up(phba);
3249 /* Restart the timer for next eratt poll */
3250 mod_timer(&phba->eratt_poll,
3252 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3258 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3259 * @phba: Pointer to HBA context object.
3260 * @pring: Pointer to driver SLI ring object.
3261 * @mask: Host attention register mask for this ring.
3263 * This function is called from the interrupt context when there is a ring
3264 * event for the fcp ring. The caller does not hold any lock.
3265 * The function processes each response iocb in the response ring until it
3266 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3267 * LE bit set. The function will call the completion handler of the command iocb
3268 * if the response iocb indicates a completion for a command iocb or it is
3269 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3270 * function if this is an unsolicited iocb.
3271 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3272 * to check it explicitly.
3275 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3276 struct lpfc_sli_ring *pring, uint32_t mask)
3278 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3279 IOCB_t *irsp = NULL;
3280 IOCB_t *entry = NULL;
3281 struct lpfc_iocbq *cmdiocbq = NULL;
3282 struct lpfc_iocbq rspiocbq;
3284 uint32_t portRspPut, portRspMax;
3286 lpfc_iocb_type type;
3287 unsigned long iflag;
3288 uint32_t rsp_cmpl = 0;
3290 spin_lock_irqsave(&phba->hbalock, iflag);
3291 pring->stats.iocb_event++;
3294 * The next available response entry should never exceed the maximum
3295 * entries. If it does, treat it as an adapter hardware error.
3297 portRspMax = pring->sli.sli3.numRiocb;
3298 portRspPut = le32_to_cpu(pgp->rspPutInx);
3299 if (unlikely(portRspPut >= portRspMax)) {
3300 lpfc_sli_rsp_pointers_error(phba, pring);
3301 spin_unlock_irqrestore(&phba->hbalock, iflag);
3304 if (phba->fcp_ring_in_use) {
3305 spin_unlock_irqrestore(&phba->hbalock, iflag);
3308 phba->fcp_ring_in_use = 1;
3311 while (pring->sli.sli3.rspidx != portRspPut) {
3313 * Fetch an entry off the ring and copy it into a local data
3314 * structure. The copy involves a byte-swap since the
3315 * network byte order and pci byte orders are different.
3317 entry = lpfc_resp_iocb(phba, pring);
3318 phba->last_completion_time = jiffies;
3320 if (++pring->sli.sli3.rspidx >= portRspMax)
3321 pring->sli.sli3.rspidx = 0;
3323 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3324 (uint32_t *) &rspiocbq.iocb,
3325 phba->iocb_rsp_size);
3326 INIT_LIST_HEAD(&(rspiocbq.list));
3327 irsp = &rspiocbq.iocb;
3329 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3330 pring->stats.iocb_rsp++;
3333 if (unlikely(irsp->ulpStatus)) {
3335 * If resource errors reported from HBA, reduce
3336 * queuedepths of the SCSI device.
3338 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3339 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3340 IOERR_NO_RESOURCES)) {
3341 spin_unlock_irqrestore(&phba->hbalock, iflag);
3342 phba->lpfc_rampdown_queue_depth(phba);
3343 spin_lock_irqsave(&phba->hbalock, iflag);
3346 /* Rsp ring <ringno> error: IOCB */
3347 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3348 "0336 Rsp Ring %d error: IOCB Data: "
3349 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3351 irsp->un.ulpWord[0],
3352 irsp->un.ulpWord[1],
3353 irsp->un.ulpWord[2],
3354 irsp->un.ulpWord[3],
3355 irsp->un.ulpWord[4],
3356 irsp->un.ulpWord[5],
3357 *(uint32_t *)&irsp->un1,
3358 *((uint32_t *)&irsp->un1 + 1));
3362 case LPFC_ABORT_IOCB:
3365 * Idle exchange closed via ABTS from port. No iocb
3366 * resources need to be recovered.
3368 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3369 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3370 "0333 IOCB cmd 0x%x"
3371 " processed. Skipping"
3377 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3379 if (unlikely(!cmdiocbq))
3381 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3382 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3383 if (cmdiocbq->iocb_cmpl) {
3384 spin_unlock_irqrestore(&phba->hbalock, iflag);
3385 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3387 spin_lock_irqsave(&phba->hbalock, iflag);
3390 case LPFC_UNSOL_IOCB:
3391 spin_unlock_irqrestore(&phba->hbalock, iflag);
3392 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3393 spin_lock_irqsave(&phba->hbalock, iflag);
3396 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3397 char adaptermsg[LPFC_MAX_ADPTMSG];
3398 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3399 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3401 dev_warn(&((phba->pcidev)->dev),
3403 phba->brd_no, adaptermsg);
3405 /* Unknown IOCB command */
3406 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3407 "0334 Unknown IOCB command "
3408 "Data: x%x, x%x x%x x%x x%x\n",
3409 type, irsp->ulpCommand,
3418 * The response IOCB has been processed. Update the ring
3419 * pointer in SLIM. If the port response put pointer has not
3420 * been updated, sync the pgp->rspPutInx and fetch the new port
3421 * response put pointer.
3423 writel(pring->sli.sli3.rspidx,
3424 &phba->host_gp[pring->ringno].rspGetInx);
3426 if (pring->sli.sli3.rspidx == portRspPut)
3427 portRspPut = le32_to_cpu(pgp->rspPutInx);
3430 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3431 pring->stats.iocb_rsp_full++;
3432 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3433 writel(status, phba->CAregaddr);
3434 readl(phba->CAregaddr);
3436 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3437 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3438 pring->stats.iocb_cmd_empty++;
3440 /* Force update of the local copy of cmdGetInx */
3441 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3442 lpfc_sli_resume_iocb(phba, pring);
3444 if ((pring->lpfc_sli_cmd_available))
3445 (pring->lpfc_sli_cmd_available) (phba, pring);
3449 phba->fcp_ring_in_use = 0;
3450 spin_unlock_irqrestore(&phba->hbalock, iflag);
3455 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3456 * @phba: Pointer to HBA context object.
3457 * @pring: Pointer to driver SLI ring object.
3458 * @rspiocbp: Pointer to driver response IOCB object.
3460 * This function is called from the worker thread when there is a slow-path
3461 * response IOCB to process. This function chains all the response iocbs until
3462 * seeing the iocb with the LE bit set. The function will call
3463 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3464 * completion of a command iocb. The function will call the
3465 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3466 * The function frees the resources or calls the completion handler if this
3467 * iocb is an abort completion. The function returns NULL when the response
3468 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3469 * this function shall chain the iocb on to the iocb_continueq and return the
3470 * response iocb passed in.
3472 static struct lpfc_iocbq *
3473 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3474 struct lpfc_iocbq *rspiocbp)
3476 struct lpfc_iocbq *saveq;
3477 struct lpfc_iocbq *cmdiocbp;
3478 struct lpfc_iocbq *next_iocb;
3479 IOCB_t *irsp = NULL;
3480 uint32_t free_saveq;
3481 uint8_t iocb_cmd_type;
3482 lpfc_iocb_type type;
3483 unsigned long iflag;
3486 spin_lock_irqsave(&phba->hbalock, iflag);
3487 /* First add the response iocb to the countinueq list */
3488 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3489 pring->iocb_continueq_cnt++;
3491 /* Now, determine whether the list is completed for processing */
3492 irsp = &rspiocbp->iocb;
3495 * By default, the driver expects to free all resources
3496 * associated with this iocb completion.
3499 saveq = list_get_first(&pring->iocb_continueq,
3500 struct lpfc_iocbq, list);
3501 irsp = &(saveq->iocb);
3502 list_del_init(&pring->iocb_continueq);
3503 pring->iocb_continueq_cnt = 0;
3505 pring->stats.iocb_rsp++;
3508 * If resource errors reported from HBA, reduce
3509 * queuedepths of the SCSI device.
3511 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3512 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3513 IOERR_NO_RESOURCES)) {
3514 spin_unlock_irqrestore(&phba->hbalock, iflag);
3515 phba->lpfc_rampdown_queue_depth(phba);
3516 spin_lock_irqsave(&phba->hbalock, iflag);
3519 if (irsp->ulpStatus) {
3520 /* Rsp ring <ringno> error: IOCB */
3521 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3522 "0328 Rsp Ring %d error: "
3527 "x%x x%x x%x x%x\n",
3529 irsp->un.ulpWord[0],
3530 irsp->un.ulpWord[1],
3531 irsp->un.ulpWord[2],
3532 irsp->un.ulpWord[3],
3533 irsp->un.ulpWord[4],
3534 irsp->un.ulpWord[5],
3535 *(((uint32_t *) irsp) + 6),
3536 *(((uint32_t *) irsp) + 7),
3537 *(((uint32_t *) irsp) + 8),
3538 *(((uint32_t *) irsp) + 9),
3539 *(((uint32_t *) irsp) + 10),
3540 *(((uint32_t *) irsp) + 11),
3541 *(((uint32_t *) irsp) + 12),
3542 *(((uint32_t *) irsp) + 13),
3543 *(((uint32_t *) irsp) + 14),
3544 *(((uint32_t *) irsp) + 15));
3548 * Fetch the IOCB command type and call the correct completion
3549 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3550 * get freed back to the lpfc_iocb_list by the discovery
3553 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3554 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3557 spin_unlock_irqrestore(&phba->hbalock, iflag);
3558 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3559 spin_lock_irqsave(&phba->hbalock, iflag);
3562 case LPFC_UNSOL_IOCB:
3563 spin_unlock_irqrestore(&phba->hbalock, iflag);
3564 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3565 spin_lock_irqsave(&phba->hbalock, iflag);
3570 case LPFC_ABORT_IOCB:
3572 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3573 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3576 /* Call the specified completion routine */
3577 if (cmdiocbp->iocb_cmpl) {
3578 spin_unlock_irqrestore(&phba->hbalock,
3580 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3582 spin_lock_irqsave(&phba->hbalock,
3585 __lpfc_sli_release_iocbq(phba,
3590 case LPFC_UNKNOWN_IOCB:
3591 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3592 char adaptermsg[LPFC_MAX_ADPTMSG];
3593 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3594 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3596 dev_warn(&((phba->pcidev)->dev),
3598 phba->brd_no, adaptermsg);
3600 /* Unknown IOCB command */
3601 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3602 "0335 Unknown IOCB "
3603 "command Data: x%x "
3614 list_for_each_entry_safe(rspiocbp, next_iocb,
3615 &saveq->list, list) {
3616 list_del_init(&rspiocbp->list);
3617 __lpfc_sli_release_iocbq(phba, rspiocbp);
3619 __lpfc_sli_release_iocbq(phba, saveq);
3623 spin_unlock_irqrestore(&phba->hbalock, iflag);
3628 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3629 * @phba: Pointer to HBA context object.
3630 * @pring: Pointer to driver SLI ring object.
3631 * @mask: Host attention register mask for this ring.
3633 * This routine wraps the actual slow_ring event process routine from the
3634 * API jump table function pointer from the lpfc_hba struct.
3637 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3638 struct lpfc_sli_ring *pring, uint32_t mask)
3640 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3644 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3645 * @phba: Pointer to HBA context object.
3646 * @pring: Pointer to driver SLI ring object.
3647 * @mask: Host attention register mask for this ring.
3649 * This function is called from the worker thread when there is a ring event
3650 * for non-fcp rings. The caller does not hold any lock. The function will
3651 * remove each response iocb in the response ring and calls the handle
3652 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3655 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3656 struct lpfc_sli_ring *pring, uint32_t mask)
3658 struct lpfc_pgp *pgp;
3660 IOCB_t *irsp = NULL;
3661 struct lpfc_iocbq *rspiocbp = NULL;
3662 uint32_t portRspPut, portRspMax;
3663 unsigned long iflag;
3666 pgp = &phba->port_gp[pring->ringno];
3667 spin_lock_irqsave(&phba->hbalock, iflag);
3668 pring->stats.iocb_event++;
3671 * The next available response entry should never exceed the maximum
3672 * entries. If it does, treat it as an adapter hardware error.
3674 portRspMax = pring->sli.sli3.numRiocb;
3675 portRspPut = le32_to_cpu(pgp->rspPutInx);
3676 if (portRspPut >= portRspMax) {
3678 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3679 * rsp ring <portRspMax>
3681 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3682 "0303 Ring %d handler: portRspPut %d "
3683 "is bigger than rsp ring %d\n",
3684 pring->ringno, portRspPut, portRspMax);
3686 phba->link_state = LPFC_HBA_ERROR;
3687 spin_unlock_irqrestore(&phba->hbalock, iflag);
3689 phba->work_hs = HS_FFER3;
3690 lpfc_handle_eratt(phba);
3696 while (pring->sli.sli3.rspidx != portRspPut) {
3698 * Build a completion list and call the appropriate handler.
3699 * The process is to get the next available response iocb, get
3700 * a free iocb from the list, copy the response data into the
3701 * free iocb, insert to the continuation list, and update the
3702 * next response index to slim. This process makes response
3703 * iocb's in the ring available to DMA as fast as possible but
3704 * pays a penalty for a copy operation. Since the iocb is
3705 * only 32 bytes, this penalty is considered small relative to
3706 * the PCI reads for register values and a slim write. When
3707 * the ulpLe field is set, the entire Command has been
3710 entry = lpfc_resp_iocb(phba, pring);
3712 phba->last_completion_time = jiffies;
3713 rspiocbp = __lpfc_sli_get_iocbq(phba);
3714 if (rspiocbp == NULL) {
3715 printk(KERN_ERR "%s: out of buffers! Failing "
3716 "completion.\n", __func__);
3720 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3721 phba->iocb_rsp_size);
3722 irsp = &rspiocbp->iocb;
3724 if (++pring->sli.sli3.rspidx >= portRspMax)
3725 pring->sli.sli3.rspidx = 0;
3727 if (pring->ringno == LPFC_ELS_RING) {
3728 lpfc_debugfs_slow_ring_trc(phba,
3729 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3730 *(((uint32_t *) irsp) + 4),
3731 *(((uint32_t *) irsp) + 6),
3732 *(((uint32_t *) irsp) + 7));
3735 writel(pring->sli.sli3.rspidx,
3736 &phba->host_gp[pring->ringno].rspGetInx);
3738 spin_unlock_irqrestore(&phba->hbalock, iflag);
3739 /* Handle the response IOCB */
3740 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3741 spin_lock_irqsave(&phba->hbalock, iflag);
3744 * If the port response put pointer has not been updated, sync
3745 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3746 * response put pointer.
3748 if (pring->sli.sli3.rspidx == portRspPut) {
3749 portRspPut = le32_to_cpu(pgp->rspPutInx);
3751 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3753 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3754 /* At least one response entry has been freed */
3755 pring->stats.iocb_rsp_full++;
3756 /* SET RxRE_RSP in Chip Att register */
3757 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3758 writel(status, phba->CAregaddr);
3759 readl(phba->CAregaddr); /* flush */
3761 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3762 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3763 pring->stats.iocb_cmd_empty++;
3765 /* Force update of the local copy of cmdGetInx */
3766 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3767 lpfc_sli_resume_iocb(phba, pring);
3769 if ((pring->lpfc_sli_cmd_available))
3770 (pring->lpfc_sli_cmd_available) (phba, pring);
3774 spin_unlock_irqrestore(&phba->hbalock, iflag);
3779 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3780 * @phba: Pointer to HBA context object.
3781 * @pring: Pointer to driver SLI ring object.
3782 * @mask: Host attention register mask for this ring.
3784 * This function is called from the worker thread when there is a pending
3785 * ELS response iocb on the driver internal slow-path response iocb worker
3786 * queue. The caller does not hold any lock. The function will remove each
3787 * response iocb from the response worker queue and calls the handle
3788 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3791 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3792 struct lpfc_sli_ring *pring, uint32_t mask)
3794 struct lpfc_iocbq *irspiocbq;
3795 struct hbq_dmabuf *dmabuf;
3796 struct lpfc_cq_event *cq_event;
3797 unsigned long iflag;
3800 spin_lock_irqsave(&phba->hbalock, iflag);
3801 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3802 spin_unlock_irqrestore(&phba->hbalock, iflag);
3803 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3804 /* Get the response iocb from the head of work queue */
3805 spin_lock_irqsave(&phba->hbalock, iflag);
3806 list_remove_head(&phba->sli4_hba.sp_queue_event,
3807 cq_event, struct lpfc_cq_event, list);
3808 spin_unlock_irqrestore(&phba->hbalock, iflag);
3810 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3811 case CQE_CODE_COMPL_WQE:
3812 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3814 /* Translate ELS WCQE to response IOCBQ */
3815 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3818 lpfc_sli_sp_handle_rspiocb(phba, pring,
3822 case CQE_CODE_RECEIVE:
3823 case CQE_CODE_RECEIVE_V1:
3824 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3826 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3833 /* Limit the number of events to 64 to avoid soft lockups */
3840 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3841 * @phba: Pointer to HBA context object.
3842 * @pring: Pointer to driver SLI ring object.
3844 * This function aborts all iocbs in the given ring and frees all the iocb
3845 * objects in txq. This function issues an abort iocb for all the iocb commands
3846 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3847 * the return of this function. The caller is not required to hold any locks.
3850 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3852 LIST_HEAD(completions);
3853 struct lpfc_iocbq *iocb, *next_iocb;
3855 if (pring->ringno == LPFC_ELS_RING) {
3856 lpfc_fabric_abort_hba(phba);
3859 /* Error everything on txq and txcmplq
3862 if (phba->sli_rev >= LPFC_SLI_REV4) {
3863 spin_lock_irq(&pring->ring_lock);
3864 list_splice_init(&pring->txq, &completions);
3866 spin_unlock_irq(&pring->ring_lock);
3868 spin_lock_irq(&phba->hbalock);
3869 /* Next issue ABTS for everything on the txcmplq */
3870 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3871 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3872 spin_unlock_irq(&phba->hbalock);
3874 spin_lock_irq(&phba->hbalock);
3875 list_splice_init(&pring->txq, &completions);
3878 /* Next issue ABTS for everything on the txcmplq */
3879 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3880 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3881 spin_unlock_irq(&phba->hbalock);
3884 /* Cancel all the IOCBs from the completions list */
3885 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3890 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3891 * @phba: Pointer to HBA context object.
3892 * @pring: Pointer to driver SLI ring object.
3894 * This function aborts all iocbs in the given ring and frees all the iocb
3895 * objects in txq. This function issues an abort iocb for all the iocb commands
3896 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3897 * the return of this function. The caller is not required to hold any locks.
3900 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3902 LIST_HEAD(completions);
3903 struct lpfc_iocbq *iocb, *next_iocb;
3905 if (pring->ringno == LPFC_ELS_RING)
3906 lpfc_fabric_abort_hba(phba);
3908 spin_lock_irq(&phba->hbalock);
3909 /* Next issue ABTS for everything on the txcmplq */
3910 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3911 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3912 spin_unlock_irq(&phba->hbalock);
3917 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3918 * @phba: Pointer to HBA context object.
3919 * @pring: Pointer to driver SLI ring object.
3921 * This function aborts all iocbs in FCP rings and frees all the iocb
3922 * objects in txq. This function issues an abort iocb for all the iocb commands
3923 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3924 * the return of this function. The caller is not required to hold any locks.
3927 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3929 struct lpfc_sli *psli = &phba->sli;
3930 struct lpfc_sli_ring *pring;
3933 /* Look on all the FCP Rings for the iotag */
3934 if (phba->sli_rev >= LPFC_SLI_REV4) {
3935 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3936 pring = phba->sli4_hba.fcp_wq[i]->pring;
3937 lpfc_sli_abort_iocb_ring(phba, pring);
3940 pring = &psli->sli3_ring[LPFC_FCP_RING];
3941 lpfc_sli_abort_iocb_ring(phba, pring);
3946 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3947 * @phba: Pointer to HBA context object.
3949 * This function aborts all wqes in NVME rings. This function issues an
3950 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3951 * the txcmplq is not guaranteed to complete before the return of this
3952 * function. The caller is not required to hold any locks.
3955 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3957 struct lpfc_sli_ring *pring;
3960 if (phba->sli_rev < LPFC_SLI_REV4)
3963 /* Abort all IO on each NVME ring. */
3964 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3965 pring = phba->sli4_hba.nvme_wq[i]->pring;
3966 lpfc_sli_abort_wqe_ring(phba, pring);
3972 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3973 * @phba: Pointer to HBA context object.
3975 * This function flushes all iocbs in the fcp ring and frees all the iocb
3976 * objects in txq and txcmplq. This function will not issue abort iocbs
3977 * for all the iocb commands in txcmplq, they will just be returned with
3978 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3979 * slot has been permanently disabled.
3982 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3986 struct lpfc_sli *psli = &phba->sli;
3987 struct lpfc_sli_ring *pring;
3989 struct lpfc_iocbq *piocb, *next_iocb;
3991 spin_lock_irq(&phba->hbalock);
3992 /* Indicate the I/O queues are flushed */
3993 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3994 spin_unlock_irq(&phba->hbalock);
3996 /* Look on all the FCP Rings for the iotag */
3997 if (phba->sli_rev >= LPFC_SLI_REV4) {
3998 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3999 pring = phba->sli4_hba.fcp_wq[i]->pring;
4001 spin_lock_irq(&pring->ring_lock);
4002 /* Retrieve everything on txq */
4003 list_splice_init(&pring->txq, &txq);
4004 list_for_each_entry_safe(piocb, next_iocb,
4005 &pring->txcmplq, list)
4006 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4007 /* Retrieve everything on the txcmplq */
4008 list_splice_init(&pring->txcmplq, &txcmplq);
4010 pring->txcmplq_cnt = 0;
4011 spin_unlock_irq(&pring->ring_lock);
4014 lpfc_sli_cancel_iocbs(phba, &txq,
4015 IOSTAT_LOCAL_REJECT,
4017 /* Flush the txcmpq */
4018 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4019 IOSTAT_LOCAL_REJECT,
4023 pring = &psli->sli3_ring[LPFC_FCP_RING];
4025 spin_lock_irq(&phba->hbalock);
4026 /* Retrieve everything on txq */
4027 list_splice_init(&pring->txq, &txq);
4028 list_for_each_entry_safe(piocb, next_iocb,
4029 &pring->txcmplq, list)
4030 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4031 /* Retrieve everything on the txcmplq */
4032 list_splice_init(&pring->txcmplq, &txcmplq);
4034 pring->txcmplq_cnt = 0;
4035 spin_unlock_irq(&phba->hbalock);
4038 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4040 /* Flush the txcmpq */
4041 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4047 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4048 * @phba: Pointer to HBA context object.
4050 * This function flushes all wqes in the nvme rings and frees all resources
4051 * in the txcmplq. This function does not issue abort wqes for the IO
4052 * commands in txcmplq, they will just be returned with
4053 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4054 * slot has been permanently disabled.
4057 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4060 struct lpfc_sli_ring *pring;
4062 struct lpfc_iocbq *piocb, *next_iocb;
4064 if (phba->sli_rev < LPFC_SLI_REV4)
4067 /* Hint to other driver operations that a flush is in progress. */
4068 spin_lock_irq(&phba->hbalock);
4069 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4070 spin_unlock_irq(&phba->hbalock);
4072 /* Cycle through all NVME rings and complete each IO with
4073 * a local driver reason code. This is a flush so no
4074 * abort exchange to FW.
4076 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
4077 pring = phba->sli4_hba.nvme_wq[i]->pring;
4079 spin_lock_irq(&pring->ring_lock);
4080 list_for_each_entry_safe(piocb, next_iocb,
4081 &pring->txcmplq, list)
4082 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4083 /* Retrieve everything on the txcmplq */
4084 list_splice_init(&pring->txcmplq, &txcmplq);
4085 pring->txcmplq_cnt = 0;
4086 spin_unlock_irq(&pring->ring_lock);
4088 /* Flush the txcmpq &&&PAE */
4089 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4090 IOSTAT_LOCAL_REJECT,
4096 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4097 * @phba: Pointer to HBA context object.
4098 * @mask: Bit mask to be checked.
4100 * This function reads the host status register and compares
4101 * with the provided bit mask to check if HBA completed
4102 * the restart. This function will wait in a loop for the
4103 * HBA to complete restart. If the HBA does not restart within
4104 * 15 iterations, the function will reset the HBA again. The
4105 * function returns 1 when HBA fail to restart otherwise returns
4109 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4115 /* Read the HBA Host Status Register */
4116 if (lpfc_readl(phba->HSregaddr, &status))
4120 * Check status register every 100ms for 5 retries, then every
4121 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4122 * every 2.5 sec for 4.
4123 * Break our of the loop if errors occurred during init.
4125 while (((status & mask) != mask) &&
4126 !(status & HS_FFERM) &&
4138 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4139 lpfc_sli_brdrestart(phba);
4141 /* Read the HBA Host Status Register */
4142 if (lpfc_readl(phba->HSregaddr, &status)) {
4148 /* Check to see if any errors occurred during init */
4149 if ((status & HS_FFERM) || (i >= 20)) {
4150 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4151 "2751 Adapter failed to restart, "
4152 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4154 readl(phba->MBslimaddr + 0xa8),
4155 readl(phba->MBslimaddr + 0xac));
4156 phba->link_state = LPFC_HBA_ERROR;
4164 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4165 * @phba: Pointer to HBA context object.
4166 * @mask: Bit mask to be checked.
4168 * This function checks the host status register to check if HBA is
4169 * ready. This function will wait in a loop for the HBA to be ready
4170 * If the HBA is not ready , the function will will reset the HBA PCI
4171 * function again. The function returns 1 when HBA fail to be ready
4172 * otherwise returns zero.
4175 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4180 /* Read the HBA Host Status Register */
4181 status = lpfc_sli4_post_status_check(phba);
4184 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4185 lpfc_sli_brdrestart(phba);
4186 status = lpfc_sli4_post_status_check(phba);
4189 /* Check to see if any errors occurred during init */
4191 phba->link_state = LPFC_HBA_ERROR;
4194 phba->sli4_hba.intr_enable = 0;
4200 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4201 * @phba: Pointer to HBA context object.
4202 * @mask: Bit mask to be checked.
4204 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4205 * from the API jump table function pointer from the lpfc_hba struct.
4208 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4210 return phba->lpfc_sli_brdready(phba, mask);
4213 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4216 * lpfc_reset_barrier - Make HBA ready for HBA reset
4217 * @phba: Pointer to HBA context object.
4219 * This function is called before resetting an HBA. This function is called
4220 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4222 void lpfc_reset_barrier(struct lpfc_hba *phba)
4224 uint32_t __iomem *resp_buf;
4225 uint32_t __iomem *mbox_buf;
4226 volatile uint32_t mbox;
4227 uint32_t hc_copy, ha_copy, resp_data;
4231 lockdep_assert_held(&phba->hbalock);
4233 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4234 if (hdrtype != 0x80 ||
4235 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4236 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4240 * Tell the other part of the chip to suspend temporarily all
4243 resp_buf = phba->MBslimaddr;
4245 /* Disable the error attention */
4246 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4248 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4249 readl(phba->HCregaddr); /* flush */
4250 phba->link_flag |= LS_IGNORE_ERATT;
4252 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4254 if (ha_copy & HA_ERATT) {
4255 /* Clear Chip error bit */
4256 writel(HA_ERATT, phba->HAregaddr);
4257 phba->pport->stopped = 1;
4261 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4262 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4264 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4265 mbox_buf = phba->MBslimaddr;
4266 writel(mbox, mbox_buf);
4268 for (i = 0; i < 50; i++) {
4269 if (lpfc_readl((resp_buf + 1), &resp_data))
4271 if (resp_data != ~(BARRIER_TEST_PATTERN))
4277 if (lpfc_readl((resp_buf + 1), &resp_data))
4279 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4280 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4281 phba->pport->stopped)
4287 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4289 for (i = 0; i < 500; i++) {
4290 if (lpfc_readl(resp_buf, &resp_data))
4292 if (resp_data != mbox)
4301 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4303 if (!(ha_copy & HA_ERATT))
4309 if (readl(phba->HAregaddr) & HA_ERATT) {
4310 writel(HA_ERATT, phba->HAregaddr);
4311 phba->pport->stopped = 1;
4315 phba->link_flag &= ~LS_IGNORE_ERATT;
4316 writel(hc_copy, phba->HCregaddr);
4317 readl(phba->HCregaddr); /* flush */
4321 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4322 * @phba: Pointer to HBA context object.
4324 * This function issues a kill_board mailbox command and waits for
4325 * the error attention interrupt. This function is called for stopping
4326 * the firmware processing. The caller is not required to hold any
4327 * locks. This function calls lpfc_hba_down_post function to free
4328 * any pending commands after the kill. The function will return 1 when it
4329 * fails to kill the board else will return 0.
4332 lpfc_sli_brdkill(struct lpfc_hba *phba)
4334 struct lpfc_sli *psli;
4344 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4345 "0329 Kill HBA Data: x%x x%x\n",
4346 phba->pport->port_state, psli->sli_flag);
4348 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4352 /* Disable the error attention */
4353 spin_lock_irq(&phba->hbalock);
4354 if (lpfc_readl(phba->HCregaddr, &status)) {
4355 spin_unlock_irq(&phba->hbalock);
4356 mempool_free(pmb, phba->mbox_mem_pool);
4359 status &= ~HC_ERINT_ENA;
4360 writel(status, phba->HCregaddr);
4361 readl(phba->HCregaddr); /* flush */
4362 phba->link_flag |= LS_IGNORE_ERATT;
4363 spin_unlock_irq(&phba->hbalock);
4365 lpfc_kill_board(phba, pmb);
4366 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4367 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4369 if (retval != MBX_SUCCESS) {
4370 if (retval != MBX_BUSY)
4371 mempool_free(pmb, phba->mbox_mem_pool);
4372 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4373 "2752 KILL_BOARD command failed retval %d\n",
4375 spin_lock_irq(&phba->hbalock);
4376 phba->link_flag &= ~LS_IGNORE_ERATT;
4377 spin_unlock_irq(&phba->hbalock);
4381 spin_lock_irq(&phba->hbalock);
4382 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4383 spin_unlock_irq(&phba->hbalock);
4385 mempool_free(pmb, phba->mbox_mem_pool);
4387 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4388 * attention every 100ms for 3 seconds. If we don't get ERATT after
4389 * 3 seconds we still set HBA_ERROR state because the status of the
4390 * board is now undefined.
4392 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4394 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4396 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4400 del_timer_sync(&psli->mbox_tmo);
4401 if (ha_copy & HA_ERATT) {
4402 writel(HA_ERATT, phba->HAregaddr);
4403 phba->pport->stopped = 1;
4405 spin_lock_irq(&phba->hbalock);
4406 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4407 psli->mbox_active = NULL;
4408 phba->link_flag &= ~LS_IGNORE_ERATT;
4409 spin_unlock_irq(&phba->hbalock);
4411 lpfc_hba_down_post(phba);
4412 phba->link_state = LPFC_HBA_ERROR;
4414 return ha_copy & HA_ERATT ? 0 : 1;
4418 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4419 * @phba: Pointer to HBA context object.
4421 * This function resets the HBA by writing HC_INITFF to the control
4422 * register. After the HBA resets, this function resets all the iocb ring
4423 * indices. This function disables PCI layer parity checking during
4425 * This function returns 0 always.
4426 * The caller is not required to hold any locks.
4429 lpfc_sli_brdreset(struct lpfc_hba *phba)
4431 struct lpfc_sli *psli;
4432 struct lpfc_sli_ring *pring;
4439 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4440 "0325 Reset HBA Data: x%x x%x\n",
4441 (phba->pport) ? phba->pport->port_state : 0,
4444 /* perform board reset */
4445 phba->fc_eventTag = 0;
4446 phba->link_events = 0;
4448 phba->pport->fc_myDID = 0;
4449 phba->pport->fc_prevDID = 0;
4452 /* Turn off parity checking and serr during the physical reset */
4453 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4454 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4456 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4458 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4460 /* Now toggle INITFF bit in the Host Control Register */
4461 writel(HC_INITFF, phba->HCregaddr);
4463 readl(phba->HCregaddr); /* flush */
4464 writel(0, phba->HCregaddr);
4465 readl(phba->HCregaddr); /* flush */
4467 /* Restore PCI cmd register */
4468 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4470 /* Initialize relevant SLI info */
4471 for (i = 0; i < psli->num_rings; i++) {
4472 pring = &psli->sli3_ring[i];
4474 pring->sli.sli3.rspidx = 0;
4475 pring->sli.sli3.next_cmdidx = 0;
4476 pring->sli.sli3.local_getidx = 0;
4477 pring->sli.sli3.cmdidx = 0;
4478 pring->missbufcnt = 0;
4481 phba->link_state = LPFC_WARM_START;
4486 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4487 * @phba: Pointer to HBA context object.
4489 * This function resets a SLI4 HBA. This function disables PCI layer parity
4490 * checking during resets the device. The caller is not required to hold
4493 * This function returns 0 always.
4496 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4498 struct lpfc_sli *psli = &phba->sli;
4503 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4504 "0295 Reset HBA Data: x%x x%x x%x\n",
4505 phba->pport->port_state, psli->sli_flag,
4508 /* perform board reset */
4509 phba->fc_eventTag = 0;
4510 phba->link_events = 0;
4511 phba->pport->fc_myDID = 0;
4512 phba->pport->fc_prevDID = 0;
4514 spin_lock_irq(&phba->hbalock);
4515 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4516 phba->fcf.fcf_flag = 0;
4517 spin_unlock_irq(&phba->hbalock);
4519 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4520 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4521 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4525 /* Now physically reset the device */
4526 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4527 "0389 Performing PCI function reset!\n");
4529 /* Turn off parity checking and serr during the physical reset */
4530 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4531 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4532 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4534 /* Perform FCoE PCI function reset before freeing queue memory */
4535 rc = lpfc_pci_function_reset(phba);
4537 /* Restore PCI cmd register */
4538 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4544 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4545 * @phba: Pointer to HBA context object.
4547 * This function is called in the SLI initialization code path to
4548 * restart the HBA. The caller is not required to hold any lock.
4549 * This function writes MBX_RESTART mailbox command to the SLIM and
4550 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4551 * function to free any pending commands. The function enables
4552 * POST only during the first initialization. The function returns zero.
4553 * The function does not guarantee completion of MBX_RESTART mailbox
4554 * command before the return of this function.
4557 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4560 struct lpfc_sli *psli;
4561 volatile uint32_t word0;
4562 void __iomem *to_slim;
4563 uint32_t hba_aer_enabled;
4565 spin_lock_irq(&phba->hbalock);
4567 /* Take PCIe device Advanced Error Reporting (AER) state */
4568 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4573 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4574 "0337 Restart HBA Data: x%x x%x\n",
4575 (phba->pport) ? phba->pport->port_state : 0,
4579 mb = (MAILBOX_t *) &word0;
4580 mb->mbxCommand = MBX_RESTART;
4583 lpfc_reset_barrier(phba);
4585 to_slim = phba->MBslimaddr;
4586 writel(*(uint32_t *) mb, to_slim);
4587 readl(to_slim); /* flush */
4589 /* Only skip post after fc_ffinit is completed */
4590 if (phba->pport && phba->pport->port_state)
4591 word0 = 1; /* This is really setting up word1 */
4593 word0 = 0; /* This is really setting up word1 */
4594 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4595 writel(*(uint32_t *) mb, to_slim);
4596 readl(to_slim); /* flush */
4598 lpfc_sli_brdreset(phba);
4600 phba->pport->stopped = 0;
4601 phba->link_state = LPFC_INIT_START;
4603 spin_unlock_irq(&phba->hbalock);
4605 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4606 psli->stats_start = ktime_get_seconds();
4608 /* Give the INITFF and Post time to settle. */
4611 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4612 if (hba_aer_enabled)
4613 pci_disable_pcie_error_reporting(phba->pcidev);
4615 lpfc_hba_down_post(phba);
4621 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4622 * @phba: Pointer to HBA context object.
4624 * This function is called in the SLI initialization code path to restart
4625 * a SLI4 HBA. The caller is not required to hold any lock.
4626 * At the end of the function, it calls lpfc_hba_down_post function to
4627 * free any pending commands.
4630 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4632 struct lpfc_sli *psli = &phba->sli;
4633 uint32_t hba_aer_enabled;
4637 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4638 "0296 Restart HBA Data: x%x x%x\n",
4639 phba->pport->port_state, psli->sli_flag);
4641 /* Take PCIe device Advanced Error Reporting (AER) state */
4642 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4644 rc = lpfc_sli4_brdreset(phba);
4648 spin_lock_irq(&phba->hbalock);
4649 phba->pport->stopped = 0;
4650 phba->link_state = LPFC_INIT_START;
4652 spin_unlock_irq(&phba->hbalock);
4654 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4655 psli->stats_start = ktime_get_seconds();
4657 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4658 if (hba_aer_enabled)
4659 pci_disable_pcie_error_reporting(phba->pcidev);
4661 lpfc_hba_down_post(phba);
4662 lpfc_sli4_queue_destroy(phba);
4668 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4669 * @phba: Pointer to HBA context object.
4671 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4672 * API jump table function pointer from the lpfc_hba struct.
4675 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4677 return phba->lpfc_sli_brdrestart(phba);
4681 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4682 * @phba: Pointer to HBA context object.
4684 * This function is called after a HBA restart to wait for successful
4685 * restart of the HBA. Successful restart of the HBA is indicated by
4686 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4687 * iteration, the function will restart the HBA again. The function returns
4688 * zero if HBA successfully restarted else returns negative error code.
4691 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4693 uint32_t status, i = 0;
4695 /* Read the HBA Host Status Register */
4696 if (lpfc_readl(phba->HSregaddr, &status))
4699 /* Check status register to see what current state is */
4701 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4703 /* Check every 10ms for 10 retries, then every 100ms for 90
4704 * retries, then every 1 sec for 50 retires for a total of
4705 * ~60 seconds before reset the board again and check every
4706 * 1 sec for 50 retries. The up to 60 seconds before the
4707 * board ready is required by the Falcon FIPS zeroization
4708 * complete, and any reset the board in between shall cause
4709 * restart of zeroization, further delay the board ready.
4712 /* Adapter failed to init, timeout, status reg
4714 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4715 "0436 Adapter failed to init, "
4716 "timeout, status reg x%x, "
4717 "FW Data: A8 x%x AC x%x\n", status,
4718 readl(phba->MBslimaddr + 0xa8),
4719 readl(phba->MBslimaddr + 0xac));
4720 phba->link_state = LPFC_HBA_ERROR;
4724 /* Check to see if any errors occurred during init */
4725 if (status & HS_FFERM) {
4726 /* ERROR: During chipset initialization */
4727 /* Adapter failed to init, chipset, status reg
4729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4730 "0437 Adapter failed to init, "
4731 "chipset, status reg x%x, "
4732 "FW Data: A8 x%x AC x%x\n", status,
4733 readl(phba->MBslimaddr + 0xa8),
4734 readl(phba->MBslimaddr + 0xac));
4735 phba->link_state = LPFC_HBA_ERROR;
4748 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4749 lpfc_sli_brdrestart(phba);
4751 /* Read the HBA Host Status Register */
4752 if (lpfc_readl(phba->HSregaddr, &status))
4756 /* Check to see if any errors occurred during init */
4757 if (status & HS_FFERM) {
4758 /* ERROR: During chipset initialization */
4759 /* Adapter failed to init, chipset, status reg <status> */
4760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4761 "0438 Adapter failed to init, chipset, "
4763 "FW Data: A8 x%x AC x%x\n", status,
4764 readl(phba->MBslimaddr + 0xa8),
4765 readl(phba->MBslimaddr + 0xac));
4766 phba->link_state = LPFC_HBA_ERROR;
4770 /* Clear all interrupt enable conditions */
4771 writel(0, phba->HCregaddr);
4772 readl(phba->HCregaddr); /* flush */
4774 /* setup host attn register */
4775 writel(0xffffffff, phba->HAregaddr);
4776 readl(phba->HAregaddr); /* flush */
4781 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4783 * This function calculates and returns the number of HBQs required to be
4787 lpfc_sli_hbq_count(void)
4789 return ARRAY_SIZE(lpfc_hbq_defs);
4793 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4795 * This function adds the number of hbq entries in every HBQ to get
4796 * the total number of hbq entries required for the HBA and returns
4800 lpfc_sli_hbq_entry_count(void)
4802 int hbq_count = lpfc_sli_hbq_count();
4806 for (i = 0; i < hbq_count; ++i)
4807 count += lpfc_hbq_defs[i]->entry_count;
4812 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4814 * This function calculates amount of memory required for all hbq entries
4815 * to be configured and returns the total memory required.
4818 lpfc_sli_hbq_size(void)
4820 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4824 * lpfc_sli_hbq_setup - configure and initialize HBQs
4825 * @phba: Pointer to HBA context object.
4827 * This function is called during the SLI initialization to configure
4828 * all the HBQs and post buffers to the HBQ. The caller is not
4829 * required to hold any locks. This function will return zero if successful
4830 * else it will return negative error code.
4833 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4835 int hbq_count = lpfc_sli_hbq_count();
4839 uint32_t hbq_entry_index;
4841 /* Get a Mailbox buffer to setup mailbox
4842 * commands for HBA initialization
4844 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4851 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4852 phba->link_state = LPFC_INIT_MBX_CMDS;
4853 phba->hbq_in_use = 1;
4855 hbq_entry_index = 0;
4856 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4857 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4858 phba->hbqs[hbqno].hbqPutIdx = 0;
4859 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4860 phba->hbqs[hbqno].entry_count =
4861 lpfc_hbq_defs[hbqno]->entry_count;
4862 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4863 hbq_entry_index, pmb);
4864 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4866 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4867 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4868 mbxStatus <status>, ring <num> */
4870 lpfc_printf_log(phba, KERN_ERR,
4871 LOG_SLI | LOG_VPORT,
4872 "1805 Adapter failed to init. "
4873 "Data: x%x x%x x%x\n",
4875 pmbox->mbxStatus, hbqno);
4877 phba->link_state = LPFC_HBA_ERROR;
4878 mempool_free(pmb, phba->mbox_mem_pool);
4882 phba->hbq_count = hbq_count;
4884 mempool_free(pmb, phba->mbox_mem_pool);
4886 /* Initially populate or replenish the HBQs */
4887 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4888 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4893 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4894 * @phba: Pointer to HBA context object.
4896 * This function is called during the SLI initialization to configure
4897 * all the HBQs and post buffers to the HBQ. The caller is not
4898 * required to hold any locks. This function will return zero if successful
4899 * else it will return negative error code.
4902 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4904 phba->hbq_in_use = 1;
4905 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4906 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4907 phba->hbq_count = 1;
4908 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4909 /* Initially populate or replenish the HBQs */
4914 * lpfc_sli_config_port - Issue config port mailbox command
4915 * @phba: Pointer to HBA context object.
4916 * @sli_mode: sli mode - 2/3
4918 * This function is called by the sli initialization code path
4919 * to issue config_port mailbox command. This function restarts the
4920 * HBA firmware and issues a config_port mailbox command to configure
4921 * the SLI interface in the sli mode specified by sli_mode
4922 * variable. The caller is not required to hold any locks.
4923 * The function returns 0 if successful, else returns negative error
4927 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4930 uint32_t resetcount = 0, rc = 0, done = 0;
4932 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4934 phba->link_state = LPFC_HBA_ERROR;
4938 phba->sli_rev = sli_mode;
4939 while (resetcount < 2 && !done) {
4940 spin_lock_irq(&phba->hbalock);
4941 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4942 spin_unlock_irq(&phba->hbalock);
4943 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4944 lpfc_sli_brdrestart(phba);
4945 rc = lpfc_sli_chipset_init(phba);
4949 spin_lock_irq(&phba->hbalock);
4950 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4951 spin_unlock_irq(&phba->hbalock);
4954 /* Call pre CONFIG_PORT mailbox command initialization. A
4955 * value of 0 means the call was successful. Any other
4956 * nonzero value is a failure, but if ERESTART is returned,
4957 * the driver may reset the HBA and try again.
4959 rc = lpfc_config_port_prep(phba);
4960 if (rc == -ERESTART) {
4961 phba->link_state = LPFC_LINK_UNKNOWN;
4966 phba->link_state = LPFC_INIT_MBX_CMDS;
4967 lpfc_config_port(phba, pmb);
4968 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4969 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4970 LPFC_SLI3_HBQ_ENABLED |
4971 LPFC_SLI3_CRP_ENABLED |
4972 LPFC_SLI3_DSS_ENABLED);
4973 if (rc != MBX_SUCCESS) {
4974 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4975 "0442 Adapter failed to init, mbxCmd x%x "
4976 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4977 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4978 spin_lock_irq(&phba->hbalock);
4979 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4980 spin_unlock_irq(&phba->hbalock);
4983 /* Allow asynchronous mailbox command to go through */
4984 spin_lock_irq(&phba->hbalock);
4985 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4986 spin_unlock_irq(&phba->hbalock);
4989 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4990 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4991 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4992 "3110 Port did not grant ASABT\n");
4997 goto do_prep_failed;
4999 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5000 if (!pmb->u.mb.un.varCfgPort.cMA) {
5002 goto do_prep_failed;
5004 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5005 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5006 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5007 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5008 phba->max_vpi : phba->max_vports;
5012 phba->fips_level = 0;
5013 phba->fips_spec_rev = 0;
5014 if (pmb->u.mb.un.varCfgPort.gdss) {
5015 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
5016 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5017 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5018 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5019 "2850 Security Crypto Active. FIPS x%d "
5021 phba->fips_level, phba->fips_spec_rev);
5023 if (pmb->u.mb.un.varCfgPort.sec_err) {
5024 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5025 "2856 Config Port Security Crypto "
5027 pmb->u.mb.un.varCfgPort.sec_err);
5029 if (pmb->u.mb.un.varCfgPort.gerbm)
5030 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5031 if (pmb->u.mb.un.varCfgPort.gcrp)
5032 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5034 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5035 phba->port_gp = phba->mbox->us.s3_pgp.port;
5037 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5038 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5039 phba->cfg_enable_bg = 0;
5040 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5042 "0443 Adapter did not grant "
5047 phba->hbq_get = NULL;
5048 phba->port_gp = phba->mbox->us.s2.port;
5052 mempool_free(pmb, phba->mbox_mem_pool);
5058 * lpfc_sli_hba_setup - SLI initialization function
5059 * @phba: Pointer to HBA context object.
5061 * This function is the main SLI initialization function. This function
5062 * is called by the HBA initialization code, HBA reset code and HBA
5063 * error attention handler code. Caller is not required to hold any
5064 * locks. This function issues config_port mailbox command to configure
5065 * the SLI, setup iocb rings and HBQ rings. In the end the function
5066 * calls the config_port_post function to issue init_link mailbox
5067 * command and to start the discovery. The function will return zero
5068 * if successful, else it will return negative error code.
5071 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5077 switch (phba->cfg_sli_mode) {
5079 if (phba->cfg_enable_npiv) {
5080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5081 "1824 NPIV enabled: Override sli_mode "
5082 "parameter (%d) to auto (0).\n",
5083 phba->cfg_sli_mode);
5092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5093 "1819 Unrecognized sli_mode parameter: %d.\n",
5094 phba->cfg_sli_mode);
5098 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5100 rc = lpfc_sli_config_port(phba, mode);
5102 if (rc && phba->cfg_sli_mode == 3)
5103 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5104 "1820 Unable to select SLI-3. "
5105 "Not supported by adapter.\n");
5106 if (rc && mode != 2)
5107 rc = lpfc_sli_config_port(phba, 2);
5108 else if (rc && mode == 2)
5109 rc = lpfc_sli_config_port(phba, 3);
5111 goto lpfc_sli_hba_setup_error;
5113 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5114 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5115 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5117 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5118 "2709 This device supports "
5119 "Advanced Error Reporting (AER)\n");
5120 spin_lock_irq(&phba->hbalock);
5121 phba->hba_flag |= HBA_AER_ENABLED;
5122 spin_unlock_irq(&phba->hbalock);
5124 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5125 "2708 This device does not support "
5126 "Advanced Error Reporting (AER): %d\n",
5128 phba->cfg_aer_support = 0;
5132 if (phba->sli_rev == 3) {
5133 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5134 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5136 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5137 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5138 phba->sli3_options = 0;
5141 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5142 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5143 phba->sli_rev, phba->max_vpi);
5144 rc = lpfc_sli_ring_map(phba);
5147 goto lpfc_sli_hba_setup_error;
5149 /* Initialize VPIs. */
5150 if (phba->sli_rev == LPFC_SLI_REV3) {
5152 * The VPI bitmask and physical ID array are allocated
5153 * and initialized once only - at driver load. A port
5154 * reset doesn't need to reinitialize this memory.
5156 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5157 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5158 phba->vpi_bmask = kcalloc(longs,
5159 sizeof(unsigned long),
5161 if (!phba->vpi_bmask) {
5163 goto lpfc_sli_hba_setup_error;
5166 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5169 if (!phba->vpi_ids) {
5170 kfree(phba->vpi_bmask);
5172 goto lpfc_sli_hba_setup_error;
5174 for (i = 0; i < phba->max_vpi; i++)
5175 phba->vpi_ids[i] = i;
5180 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5181 rc = lpfc_sli_hbq_setup(phba);
5183 goto lpfc_sli_hba_setup_error;
5185 spin_lock_irq(&phba->hbalock);
5186 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5187 spin_unlock_irq(&phba->hbalock);
5189 rc = lpfc_config_port_post(phba);
5191 goto lpfc_sli_hba_setup_error;
5195 lpfc_sli_hba_setup_error:
5196 phba->link_state = LPFC_HBA_ERROR;
5197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5198 "0445 Firmware initialization failed\n");
5203 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5204 * @phba: Pointer to HBA context object.
5205 * @mboxq: mailbox pointer.
5206 * This function issue a dump mailbox command to read config region
5207 * 23 and parse the records in the region and populate driver
5211 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5213 LPFC_MBOXQ_t *mboxq;
5214 struct lpfc_dmabuf *mp;
5215 struct lpfc_mqe *mqe;
5216 uint32_t data_length;
5219 /* Program the default value of vlan_id and fc_map */
5220 phba->valid_vlan = 0;
5221 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5222 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5223 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5225 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5229 mqe = &mboxq->u.mqe;
5230 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5232 goto out_free_mboxq;
5235 mp = (struct lpfc_dmabuf *) mboxq->context1;
5236 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5238 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5239 "(%d):2571 Mailbox cmd x%x Status x%x "
5240 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5241 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5242 "CQ: x%x x%x x%x x%x\n",
5243 mboxq->vport ? mboxq->vport->vpi : 0,
5244 bf_get(lpfc_mqe_command, mqe),
5245 bf_get(lpfc_mqe_status, mqe),
5246 mqe->un.mb_words[0], mqe->un.mb_words[1],
5247 mqe->un.mb_words[2], mqe->un.mb_words[3],
5248 mqe->un.mb_words[4], mqe->un.mb_words[5],
5249 mqe->un.mb_words[6], mqe->un.mb_words[7],
5250 mqe->un.mb_words[8], mqe->un.mb_words[9],
5251 mqe->un.mb_words[10], mqe->un.mb_words[11],
5252 mqe->un.mb_words[12], mqe->un.mb_words[13],
5253 mqe->un.mb_words[14], mqe->un.mb_words[15],
5254 mqe->un.mb_words[16], mqe->un.mb_words[50],
5256 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5257 mboxq->mcqe.trailer);
5260 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5263 goto out_free_mboxq;
5265 data_length = mqe->un.mb_words[5];
5266 if (data_length > DMP_RGN23_SIZE) {
5267 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5270 goto out_free_mboxq;
5273 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5274 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5279 mempool_free(mboxq, phba->mbox_mem_pool);
5284 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5285 * @phba: pointer to lpfc hba data structure.
5286 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5287 * @vpd: pointer to the memory to hold resulting port vpd data.
5288 * @vpd_size: On input, the number of bytes allocated to @vpd.
5289 * On output, the number of data bytes in @vpd.
5291 * This routine executes a READ_REV SLI4 mailbox command. In
5292 * addition, this routine gets the port vpd data.
5296 * -ENOMEM - could not allocated memory.
5299 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5300 uint8_t *vpd, uint32_t *vpd_size)
5304 struct lpfc_dmabuf *dmabuf;
5305 struct lpfc_mqe *mqe;
5307 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5312 * Get a DMA buffer for the vpd data resulting from the READ_REV
5315 dma_size = *vpd_size;
5316 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
5317 &dmabuf->phys, GFP_KERNEL);
5318 if (!dmabuf->virt) {
5324 * The SLI4 implementation of READ_REV conflicts at word1,
5325 * bits 31:16 and SLI4 adds vpd functionality not present
5326 * in SLI3. This code corrects the conflicts.
5328 lpfc_read_rev(phba, mboxq);
5329 mqe = &mboxq->u.mqe;
5330 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5331 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5332 mqe->un.read_rev.word1 &= 0x0000FFFF;
5333 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5334 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5336 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5338 dma_free_coherent(&phba->pcidev->dev, dma_size,
5339 dmabuf->virt, dmabuf->phys);
5345 * The available vpd length cannot be bigger than the
5346 * DMA buffer passed to the port. Catch the less than
5347 * case and update the caller's size.
5349 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5350 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5352 memcpy(vpd, dmabuf->virt, *vpd_size);
5354 dma_free_coherent(&phba->pcidev->dev, dma_size,
5355 dmabuf->virt, dmabuf->phys);
5361 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5362 * @phba: pointer to lpfc hba data structure.
5364 * This routine retrieves SLI4 device physical port name this PCI function
5369 * otherwise - failed to retrieve physical port name
5372 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5374 LPFC_MBOXQ_t *mboxq;
5375 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5376 struct lpfc_controller_attribute *cntl_attr;
5377 struct lpfc_mbx_get_port_name *get_port_name;
5378 void *virtaddr = NULL;
5379 uint32_t alloclen, reqlen;
5380 uint32_t shdr_status, shdr_add_status;
5381 union lpfc_sli4_cfg_shdr *shdr;
5382 char cport_name = 0;
5385 /* We assume nothing at this point */
5386 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5387 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5389 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5392 /* obtain link type and link number via READ_CONFIG */
5393 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5394 lpfc_sli4_read_config(phba);
5395 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5396 goto retrieve_ppname;
5398 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5399 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5400 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5401 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5402 LPFC_SLI4_MBX_NEMBED);
5403 if (alloclen < reqlen) {
5404 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5405 "3084 Allocated DMA memory size (%d) is "
5406 "less than the requested DMA memory size "
5407 "(%d)\n", alloclen, reqlen);
5409 goto out_free_mboxq;
5411 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5412 virtaddr = mboxq->sge_array->addr[0];
5413 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5414 shdr = &mbx_cntl_attr->cfg_shdr;
5415 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5416 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5417 if (shdr_status || shdr_add_status || rc) {
5418 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5419 "3085 Mailbox x%x (x%x/x%x) failed, "
5420 "rc:x%x, status:x%x, add_status:x%x\n",
5421 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5422 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5423 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5424 rc, shdr_status, shdr_add_status);
5426 goto out_free_mboxq;
5428 cntl_attr = &mbx_cntl_attr->cntl_attr;
5429 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5430 phba->sli4_hba.lnk_info.lnk_tp =
5431 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5432 phba->sli4_hba.lnk_info.lnk_no =
5433 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5434 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5435 "3086 lnk_type:%d, lnk_numb:%d\n",
5436 phba->sli4_hba.lnk_info.lnk_tp,
5437 phba->sli4_hba.lnk_info.lnk_no);
5440 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5441 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5442 sizeof(struct lpfc_mbx_get_port_name) -
5443 sizeof(struct lpfc_sli4_cfg_mhdr),
5444 LPFC_SLI4_MBX_EMBED);
5445 get_port_name = &mboxq->u.mqe.un.get_port_name;
5446 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5447 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5448 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5449 phba->sli4_hba.lnk_info.lnk_tp);
5450 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5451 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5452 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5453 if (shdr_status || shdr_add_status || rc) {
5454 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5455 "3087 Mailbox x%x (x%x/x%x) failed: "
5456 "rc:x%x, status:x%x, add_status:x%x\n",
5457 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5458 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5459 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5460 rc, shdr_status, shdr_add_status);
5462 goto out_free_mboxq;
5464 switch (phba->sli4_hba.lnk_info.lnk_no) {
5465 case LPFC_LINK_NUMBER_0:
5466 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5467 &get_port_name->u.response);
5468 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5470 case LPFC_LINK_NUMBER_1:
5471 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5472 &get_port_name->u.response);
5473 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5475 case LPFC_LINK_NUMBER_2:
5476 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5477 &get_port_name->u.response);
5478 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5480 case LPFC_LINK_NUMBER_3:
5481 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5482 &get_port_name->u.response);
5483 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5489 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5490 phba->Port[0] = cport_name;
5491 phba->Port[1] = '\0';
5492 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5493 "3091 SLI get port name: %s\n", phba->Port);
5497 if (rc != MBX_TIMEOUT) {
5498 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5499 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5501 mempool_free(mboxq, phba->mbox_mem_pool);
5507 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5508 * @phba: pointer to lpfc hba data structure.
5510 * This routine is called to explicitly arm the SLI4 device's completion and
5514 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5517 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5519 sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM);
5520 sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM);
5521 if (sli4_hba->nvmels_cq)
5522 sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq,
5525 if (sli4_hba->fcp_cq)
5526 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
5527 sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx],
5530 if (sli4_hba->nvme_cq)
5531 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
5532 sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx],
5536 sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM);
5538 if (sli4_hba->hba_eq)
5539 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
5540 sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx],
5543 if (phba->nvmet_support) {
5544 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5545 sli4_hba->sli4_cq_release(
5546 sli4_hba->nvmet_cqset[qidx],
5552 sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM);
5556 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5557 * @phba: Pointer to HBA context object.
5558 * @type: The resource extent type.
5559 * @extnt_count: buffer to hold port available extent count.
5560 * @extnt_size: buffer to hold element count per extent.
5562 * This function calls the port and retrievs the number of available
5563 * extents and their size for a particular extent type.
5565 * Returns: 0 if successful. Nonzero otherwise.
5568 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5569 uint16_t *extnt_count, uint16_t *extnt_size)
5574 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5577 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5581 /* Find out how many extents are available for this resource type */
5582 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5583 sizeof(struct lpfc_sli4_cfg_mhdr));
5584 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5585 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5586 length, LPFC_SLI4_MBX_EMBED);
5588 /* Send an extents count of 0 - the GET doesn't use it. */
5589 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5590 LPFC_SLI4_MBX_EMBED);
5596 if (!phba->sli4_hba.intr_enable)
5597 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5599 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5600 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5607 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5608 if (bf_get(lpfc_mbox_hdr_status,
5609 &rsrc_info->header.cfg_shdr.response)) {
5610 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5611 "2930 Failed to get resource extents "
5612 "Status 0x%x Add'l Status 0x%x\n",
5613 bf_get(lpfc_mbox_hdr_status,
5614 &rsrc_info->header.cfg_shdr.response),
5615 bf_get(lpfc_mbox_hdr_add_status,
5616 &rsrc_info->header.cfg_shdr.response));
5621 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5623 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5626 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5627 "3162 Retrieved extents type-%d from port: count:%d, "
5628 "size:%d\n", type, *extnt_count, *extnt_size);
5631 mempool_free(mbox, phba->mbox_mem_pool);
5636 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5637 * @phba: Pointer to HBA context object.
5638 * @type: The extent type to check.
5640 * This function reads the current available extents from the port and checks
5641 * if the extent count or extent size has changed since the last access.
5642 * Callers use this routine post port reset to understand if there is a
5643 * extent reprovisioning requirement.
5646 * -Error: error indicates problem.
5647 * 1: Extent count or size has changed.
5651 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5653 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5654 uint16_t size_diff, rsrc_ext_size;
5656 struct lpfc_rsrc_blks *rsrc_entry;
5657 struct list_head *rsrc_blk_list = NULL;
5661 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5668 case LPFC_RSC_TYPE_FCOE_RPI:
5669 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5671 case LPFC_RSC_TYPE_FCOE_VPI:
5672 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5674 case LPFC_RSC_TYPE_FCOE_XRI:
5675 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5677 case LPFC_RSC_TYPE_FCOE_VFI:
5678 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5684 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5686 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5690 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5697 * lpfc_sli4_cfg_post_extnts -
5698 * @phba: Pointer to HBA context object.
5699 * @extnt_cnt - number of available extents.
5700 * @type - the extent type (rpi, xri, vfi, vpi).
5701 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5702 * @mbox - pointer to the caller's allocated mailbox structure.
5704 * This function executes the extents allocation request. It also
5705 * takes care of the amount of memory needed to allocate or get the
5706 * allocated extents. It is the caller's responsibility to evaluate
5710 * -Error: Error value describes the condition found.
5714 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5715 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5720 uint32_t alloc_len, mbox_tmo;
5722 /* Calculate the total requested length of the dma memory */
5723 req_len = extnt_cnt * sizeof(uint16_t);
5726 * Calculate the size of an embedded mailbox. The uint32_t
5727 * accounts for extents-specific word.
5729 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5733 * Presume the allocation and response will fit into an embedded
5734 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5736 *emb = LPFC_SLI4_MBX_EMBED;
5737 if (req_len > emb_len) {
5738 req_len = extnt_cnt * sizeof(uint16_t) +
5739 sizeof(union lpfc_sli4_cfg_shdr) +
5741 *emb = LPFC_SLI4_MBX_NEMBED;
5744 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5745 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5747 if (alloc_len < req_len) {
5748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5749 "2982 Allocated DMA memory size (x%x) is "
5750 "less than the requested DMA memory "
5751 "size (x%x)\n", alloc_len, req_len);
5754 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5758 if (!phba->sli4_hba.intr_enable)
5759 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5761 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5762 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5771 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5772 * @phba: Pointer to HBA context object.
5773 * @type: The resource extent type to allocate.
5775 * This function allocates the number of elements for the specified
5779 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5782 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5783 uint16_t rsrc_id, rsrc_start, j, k;
5786 unsigned long longs;
5787 unsigned long *bmask;
5788 struct lpfc_rsrc_blks *rsrc_blks;
5791 struct lpfc_id_range *id_array = NULL;
5792 void *virtaddr = NULL;
5793 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5794 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5795 struct list_head *ext_blk_list;
5797 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5803 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5804 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5805 "3009 No available Resource Extents "
5806 "for resource type 0x%x: Count: 0x%x, "
5807 "Size 0x%x\n", type, rsrc_cnt,
5812 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5813 "2903 Post resource extents type-0x%x: "
5814 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5816 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5820 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5827 * Figure out where the response is located. Then get local pointers
5828 * to the response data. The port does not guarantee to respond to
5829 * all extents counts request so update the local variable with the
5830 * allocated count from the port.
5832 if (emb == LPFC_SLI4_MBX_EMBED) {
5833 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5834 id_array = &rsrc_ext->u.rsp.id[0];
5835 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5837 virtaddr = mbox->sge_array->addr[0];
5838 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5839 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5840 id_array = &n_rsrc->id;
5843 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5844 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5847 * Based on the resource size and count, correct the base and max
5850 length = sizeof(struct lpfc_rsrc_blks);
5852 case LPFC_RSC_TYPE_FCOE_RPI:
5853 phba->sli4_hba.rpi_bmask = kcalloc(longs,
5854 sizeof(unsigned long),
5856 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5860 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
5863 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5864 kfree(phba->sli4_hba.rpi_bmask);
5870 * The next_rpi was initialized with the maximum available
5871 * count but the port may allocate a smaller number. Catch
5872 * that case and update the next_rpi.
5874 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5876 /* Initialize local ptrs for common extent processing later. */
5877 bmask = phba->sli4_hba.rpi_bmask;
5878 ids = phba->sli4_hba.rpi_ids;
5879 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5881 case LPFC_RSC_TYPE_FCOE_VPI:
5882 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
5884 if (unlikely(!phba->vpi_bmask)) {
5888 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
5890 if (unlikely(!phba->vpi_ids)) {
5891 kfree(phba->vpi_bmask);
5896 /* Initialize local ptrs for common extent processing later. */
5897 bmask = phba->vpi_bmask;
5898 ids = phba->vpi_ids;
5899 ext_blk_list = &phba->lpfc_vpi_blk_list;
5901 case LPFC_RSC_TYPE_FCOE_XRI:
5902 phba->sli4_hba.xri_bmask = kcalloc(longs,
5903 sizeof(unsigned long),
5905 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5909 phba->sli4_hba.max_cfg_param.xri_used = 0;
5910 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
5913 if (unlikely(!phba->sli4_hba.xri_ids)) {
5914 kfree(phba->sli4_hba.xri_bmask);
5919 /* Initialize local ptrs for common extent processing later. */
5920 bmask = phba->sli4_hba.xri_bmask;
5921 ids = phba->sli4_hba.xri_ids;
5922 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5924 case LPFC_RSC_TYPE_FCOE_VFI:
5925 phba->sli4_hba.vfi_bmask = kcalloc(longs,
5926 sizeof(unsigned long),
5928 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5932 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
5935 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5936 kfree(phba->sli4_hba.vfi_bmask);
5941 /* Initialize local ptrs for common extent processing later. */
5942 bmask = phba->sli4_hba.vfi_bmask;
5943 ids = phba->sli4_hba.vfi_ids;
5944 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5947 /* Unsupported Opcode. Fail call. */
5951 ext_blk_list = NULL;
5956 * Complete initializing the extent configuration with the
5957 * allocated ids assigned to this function. The bitmask serves
5958 * as an index into the array and manages the available ids. The
5959 * array just stores the ids communicated to the port via the wqes.
5961 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5963 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5966 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5969 rsrc_blks = kzalloc(length, GFP_KERNEL);
5970 if (unlikely(!rsrc_blks)) {
5976 rsrc_blks->rsrc_start = rsrc_id;
5977 rsrc_blks->rsrc_size = rsrc_size;
5978 list_add_tail(&rsrc_blks->list, ext_blk_list);
5979 rsrc_start = rsrc_id;
5980 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
5981 phba->sli4_hba.scsi_xri_start = rsrc_start +
5982 lpfc_sli4_get_iocb_cnt(phba);
5983 phba->sli4_hba.nvme_xri_start =
5984 phba->sli4_hba.scsi_xri_start +
5985 phba->sli4_hba.scsi_xri_max;
5988 while (rsrc_id < (rsrc_start + rsrc_size)) {
5993 /* Entire word processed. Get next word.*/
5998 lpfc_sli4_mbox_cmd_free(phba, mbox);
6005 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6006 * @phba: Pointer to HBA context object.
6007 * @type: the extent's type.
6009 * This function deallocates all extents of a particular resource type.
6010 * SLI4 does not allow for deallocating a particular extent range. It
6011 * is the caller's responsibility to release all kernel memory resources.
6014 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6017 uint32_t length, mbox_tmo = 0;
6019 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6020 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6022 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6027 * This function sends an embedded mailbox because it only sends the
6028 * the resource type. All extents of this type are released by the
6031 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6032 sizeof(struct lpfc_sli4_cfg_mhdr));
6033 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6034 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6035 length, LPFC_SLI4_MBX_EMBED);
6037 /* Send an extents count of 0 - the dealloc doesn't use it. */
6038 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6039 LPFC_SLI4_MBX_EMBED);
6044 if (!phba->sli4_hba.intr_enable)
6045 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6047 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6048 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6055 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6056 if (bf_get(lpfc_mbox_hdr_status,
6057 &dealloc_rsrc->header.cfg_shdr.response)) {
6058 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6059 "2919 Failed to release resource extents "
6060 "for type %d - Status 0x%x Add'l Status 0x%x. "
6061 "Resource memory not released.\n",
6063 bf_get(lpfc_mbox_hdr_status,
6064 &dealloc_rsrc->header.cfg_shdr.response),
6065 bf_get(lpfc_mbox_hdr_add_status,
6066 &dealloc_rsrc->header.cfg_shdr.response));
6071 /* Release kernel memory resources for the specific type. */
6073 case LPFC_RSC_TYPE_FCOE_VPI:
6074 kfree(phba->vpi_bmask);
6075 kfree(phba->vpi_ids);
6076 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6077 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6078 &phba->lpfc_vpi_blk_list, list) {
6079 list_del_init(&rsrc_blk->list);
6082 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6084 case LPFC_RSC_TYPE_FCOE_XRI:
6085 kfree(phba->sli4_hba.xri_bmask);
6086 kfree(phba->sli4_hba.xri_ids);
6087 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6088 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6089 list_del_init(&rsrc_blk->list);
6093 case LPFC_RSC_TYPE_FCOE_VFI:
6094 kfree(phba->sli4_hba.vfi_bmask);
6095 kfree(phba->sli4_hba.vfi_ids);
6096 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6097 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6098 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6099 list_del_init(&rsrc_blk->list);
6103 case LPFC_RSC_TYPE_FCOE_RPI:
6104 /* RPI bitmask and physical id array are cleaned up earlier. */
6105 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6106 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6107 list_del_init(&rsrc_blk->list);
6115 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6118 mempool_free(mbox, phba->mbox_mem_pool);
6123 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6128 len = sizeof(struct lpfc_mbx_set_feature) -
6129 sizeof(struct lpfc_sli4_cfg_mhdr);
6130 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6131 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6132 LPFC_SLI4_MBX_EMBED);
6135 case LPFC_SET_UE_RECOVERY:
6136 bf_set(lpfc_mbx_set_feature_UER,
6137 &mbox->u.mqe.un.set_feature, 1);
6138 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6139 mbox->u.mqe.un.set_feature.param_len = 8;
6141 case LPFC_SET_MDS_DIAGS:
6142 bf_set(lpfc_mbx_set_feature_mds,
6143 &mbox->u.mqe.un.set_feature, 1);
6144 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6145 &mbox->u.mqe.un.set_feature, 1);
6146 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6147 mbox->u.mqe.un.set_feature.param_len = 8;
6155 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6156 * @phba: Pointer to HBA context object.
6158 * This function allocates all SLI4 resource identifiers.
6161 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6163 int i, rc, error = 0;
6164 uint16_t count, base;
6165 unsigned long longs;
6167 if (!phba->sli4_hba.rpi_hdrs_in_use)
6168 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6169 if (phba->sli4_hba.extents_in_use) {
6171 * The port supports resource extents. The XRI, VPI, VFI, RPI
6172 * resource extent count must be read and allocated before
6173 * provisioning the resource id arrays.
6175 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6176 LPFC_IDX_RSRC_RDY) {
6178 * Extent-based resources are set - the driver could
6179 * be in a port reset. Figure out if any corrective
6180 * actions need to be taken.
6182 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6183 LPFC_RSC_TYPE_FCOE_VFI);
6186 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6187 LPFC_RSC_TYPE_FCOE_VPI);
6190 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6191 LPFC_RSC_TYPE_FCOE_XRI);
6194 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6195 LPFC_RSC_TYPE_FCOE_RPI);
6200 * It's possible that the number of resources
6201 * provided to this port instance changed between
6202 * resets. Detect this condition and reallocate
6203 * resources. Otherwise, there is no action.
6206 lpfc_printf_log(phba, KERN_INFO,
6207 LOG_MBOX | LOG_INIT,
6208 "2931 Detected extent resource "
6209 "change. Reallocating all "
6211 rc = lpfc_sli4_dealloc_extent(phba,
6212 LPFC_RSC_TYPE_FCOE_VFI);
6213 rc = lpfc_sli4_dealloc_extent(phba,
6214 LPFC_RSC_TYPE_FCOE_VPI);
6215 rc = lpfc_sli4_dealloc_extent(phba,
6216 LPFC_RSC_TYPE_FCOE_XRI);
6217 rc = lpfc_sli4_dealloc_extent(phba,
6218 LPFC_RSC_TYPE_FCOE_RPI);
6223 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6227 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6231 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6235 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6238 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6243 * The port does not support resource extents. The XRI, VPI,
6244 * VFI, RPI resource ids were determined from READ_CONFIG.
6245 * Just allocate the bitmasks and provision the resource id
6246 * arrays. If a port reset is active, the resources don't
6247 * need any action - just exit.
6249 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6250 LPFC_IDX_RSRC_RDY) {
6251 lpfc_sli4_dealloc_resource_identifiers(phba);
6252 lpfc_sli4_remove_rpis(phba);
6255 count = phba->sli4_hba.max_cfg_param.max_rpi;
6257 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6258 "3279 Invalid provisioning of "
6263 base = phba->sli4_hba.max_cfg_param.rpi_base;
6264 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6265 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6266 sizeof(unsigned long),
6268 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6272 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6274 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6276 goto free_rpi_bmask;
6279 for (i = 0; i < count; i++)
6280 phba->sli4_hba.rpi_ids[i] = base + i;
6283 count = phba->sli4_hba.max_cfg_param.max_vpi;
6285 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6286 "3280 Invalid provisioning of "
6291 base = phba->sli4_hba.max_cfg_param.vpi_base;
6292 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6293 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6295 if (unlikely(!phba->vpi_bmask)) {
6299 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6301 if (unlikely(!phba->vpi_ids)) {
6303 goto free_vpi_bmask;
6306 for (i = 0; i < count; i++)
6307 phba->vpi_ids[i] = base + i;
6310 count = phba->sli4_hba.max_cfg_param.max_xri;
6312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6313 "3281 Invalid provisioning of "
6318 base = phba->sli4_hba.max_cfg_param.xri_base;
6319 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6320 phba->sli4_hba.xri_bmask = kcalloc(longs,
6321 sizeof(unsigned long),
6323 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6327 phba->sli4_hba.max_cfg_param.xri_used = 0;
6328 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6330 if (unlikely(!phba->sli4_hba.xri_ids)) {
6332 goto free_xri_bmask;
6335 for (i = 0; i < count; i++)
6336 phba->sli4_hba.xri_ids[i] = base + i;
6339 count = phba->sli4_hba.max_cfg_param.max_vfi;
6341 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6342 "3282 Invalid provisioning of "
6347 base = phba->sli4_hba.max_cfg_param.vfi_base;
6348 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6349 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6350 sizeof(unsigned long),
6352 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6356 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6358 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6360 goto free_vfi_bmask;
6363 for (i = 0; i < count; i++)
6364 phba->sli4_hba.vfi_ids[i] = base + i;
6367 * Mark all resources ready. An HBA reset doesn't need
6368 * to reset the initialization.
6370 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6376 kfree(phba->sli4_hba.vfi_bmask);
6377 phba->sli4_hba.vfi_bmask = NULL;
6379 kfree(phba->sli4_hba.xri_ids);
6380 phba->sli4_hba.xri_ids = NULL;
6382 kfree(phba->sli4_hba.xri_bmask);
6383 phba->sli4_hba.xri_bmask = NULL;
6385 kfree(phba->vpi_ids);
6386 phba->vpi_ids = NULL;
6388 kfree(phba->vpi_bmask);
6389 phba->vpi_bmask = NULL;
6391 kfree(phba->sli4_hba.rpi_ids);
6392 phba->sli4_hba.rpi_ids = NULL;
6394 kfree(phba->sli4_hba.rpi_bmask);
6395 phba->sli4_hba.rpi_bmask = NULL;
6401 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6402 * @phba: Pointer to HBA context object.
6404 * This function allocates the number of elements for the specified
6408 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6410 if (phba->sli4_hba.extents_in_use) {
6411 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6412 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6413 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6414 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6416 kfree(phba->vpi_bmask);
6417 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6418 kfree(phba->vpi_ids);
6419 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6420 kfree(phba->sli4_hba.xri_bmask);
6421 kfree(phba->sli4_hba.xri_ids);
6422 kfree(phba->sli4_hba.vfi_bmask);
6423 kfree(phba->sli4_hba.vfi_ids);
6424 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6425 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6432 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6433 * @phba: Pointer to HBA context object.
6434 * @type: The resource extent type.
6435 * @extnt_count: buffer to hold port extent count response
6436 * @extnt_size: buffer to hold port extent size response.
6438 * This function calls the port to read the host allocated extents
6439 * for a particular type.
6442 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6443 uint16_t *extnt_cnt, uint16_t *extnt_size)
6447 uint16_t curr_blks = 0;
6448 uint32_t req_len, emb_len;
6449 uint32_t alloc_len, mbox_tmo;
6450 struct list_head *blk_list_head;
6451 struct lpfc_rsrc_blks *rsrc_blk;
6453 void *virtaddr = NULL;
6454 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6455 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6456 union lpfc_sli4_cfg_shdr *shdr;
6459 case LPFC_RSC_TYPE_FCOE_VPI:
6460 blk_list_head = &phba->lpfc_vpi_blk_list;
6462 case LPFC_RSC_TYPE_FCOE_XRI:
6463 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6465 case LPFC_RSC_TYPE_FCOE_VFI:
6466 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6468 case LPFC_RSC_TYPE_FCOE_RPI:
6469 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6475 /* Count the number of extents currently allocatd for this type. */
6476 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6477 if (curr_blks == 0) {
6479 * The GET_ALLOCATED mailbox does not return the size,
6480 * just the count. The size should be just the size
6481 * stored in the current allocated block and all sizes
6482 * for an extent type are the same so set the return
6485 *extnt_size = rsrc_blk->rsrc_size;
6491 * Calculate the size of an embedded mailbox. The uint32_t
6492 * accounts for extents-specific word.
6494 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6498 * Presume the allocation and response will fit into an embedded
6499 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6501 emb = LPFC_SLI4_MBX_EMBED;
6503 if (req_len > emb_len) {
6504 req_len = curr_blks * sizeof(uint16_t) +
6505 sizeof(union lpfc_sli4_cfg_shdr) +
6507 emb = LPFC_SLI4_MBX_NEMBED;
6510 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6513 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6515 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6516 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6518 if (alloc_len < req_len) {
6519 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6520 "2983 Allocated DMA memory size (x%x) is "
6521 "less than the requested DMA memory "
6522 "size (x%x)\n", alloc_len, req_len);
6526 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6532 if (!phba->sli4_hba.intr_enable)
6533 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6535 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6536 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6545 * Figure out where the response is located. Then get local pointers
6546 * to the response data. The port does not guarantee to respond to
6547 * all extents counts request so update the local variable with the
6548 * allocated count from the port.
6550 if (emb == LPFC_SLI4_MBX_EMBED) {
6551 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6552 shdr = &rsrc_ext->header.cfg_shdr;
6553 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6555 virtaddr = mbox->sge_array->addr[0];
6556 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6557 shdr = &n_rsrc->cfg_shdr;
6558 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6561 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6562 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6563 "2984 Failed to read allocated resources "
6564 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6566 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6567 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6572 lpfc_sli4_mbox_cmd_free(phba, mbox);
6577 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6578 * @phba: pointer to lpfc hba data structure.
6579 * @pring: Pointer to driver SLI ring object.
6580 * @sgl_list: linked link of sgl buffers to post
6581 * @cnt: number of linked list buffers
6583 * This routine walks the list of buffers that have been allocated and
6584 * repost them to the port by using SGL block post. This is needed after a
6585 * pci_function_reset/warm_start or start. It attempts to construct blocks
6586 * of buffer sgls which contains contiguous xris and uses the non-embedded
6587 * SGL block post mailbox commands to post them to the port. For single
6588 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6589 * mailbox command for posting.
6591 * Returns: 0 = success, non-zero failure.
6594 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6595 struct list_head *sgl_list, int cnt)
6597 struct lpfc_sglq *sglq_entry = NULL;
6598 struct lpfc_sglq *sglq_entry_next = NULL;
6599 struct lpfc_sglq *sglq_entry_first = NULL;
6600 int status, total_cnt;
6601 int post_cnt = 0, num_posted = 0, block_cnt = 0;
6602 int last_xritag = NO_XRI;
6603 LIST_HEAD(prep_sgl_list);
6604 LIST_HEAD(blck_sgl_list);
6605 LIST_HEAD(allc_sgl_list);
6606 LIST_HEAD(post_sgl_list);
6607 LIST_HEAD(free_sgl_list);
6609 spin_lock_irq(&phba->hbalock);
6610 spin_lock(&phba->sli4_hba.sgl_list_lock);
6611 list_splice_init(sgl_list, &allc_sgl_list);
6612 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6613 spin_unlock_irq(&phba->hbalock);
6616 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6617 &allc_sgl_list, list) {
6618 list_del_init(&sglq_entry->list);
6620 if ((last_xritag != NO_XRI) &&
6621 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6622 /* a hole in xri block, form a sgl posting block */
6623 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6624 post_cnt = block_cnt - 1;
6625 /* prepare list for next posting block */
6626 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6629 /* prepare list for next posting block */
6630 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6631 /* enough sgls for non-embed sgl mbox command */
6632 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6633 list_splice_init(&prep_sgl_list,
6635 post_cnt = block_cnt;
6641 /* keep track of last sgl's xritag */
6642 last_xritag = sglq_entry->sli4_xritag;
6644 /* end of repost sgl list condition for buffers */
6645 if (num_posted == total_cnt) {
6646 if (post_cnt == 0) {
6647 list_splice_init(&prep_sgl_list,
6649 post_cnt = block_cnt;
6650 } else if (block_cnt == 1) {
6651 status = lpfc_sli4_post_sgl(phba,
6652 sglq_entry->phys, 0,
6653 sglq_entry->sli4_xritag);
6655 /* successful, put sgl to posted list */
6656 list_add_tail(&sglq_entry->list,
6659 /* Failure, put sgl to free list */
6660 lpfc_printf_log(phba, KERN_WARNING,
6662 "3159 Failed to post "
6663 "sgl, xritag:x%x\n",
6664 sglq_entry->sli4_xritag);
6665 list_add_tail(&sglq_entry->list,
6672 /* continue until a nembed page worth of sgls */
6676 /* post the buffer list sgls as a block */
6677 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
6681 /* success, put sgl list to posted sgl list */
6682 list_splice_init(&blck_sgl_list, &post_sgl_list);
6684 /* Failure, put sgl list to free sgl list */
6685 sglq_entry_first = list_first_entry(&blck_sgl_list,
6688 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6689 "3160 Failed to post sgl-list, "
6691 sglq_entry_first->sli4_xritag,
6692 (sglq_entry_first->sli4_xritag +
6694 list_splice_init(&blck_sgl_list, &free_sgl_list);
6695 total_cnt -= post_cnt;
6698 /* don't reset xirtag due to hole in xri block */
6700 last_xritag = NO_XRI;
6702 /* reset sgl post count for next round of posting */
6706 /* free the sgls failed to post */
6707 lpfc_free_sgl_list(phba, &free_sgl_list);
6709 /* push sgls posted to the available list */
6710 if (!list_empty(&post_sgl_list)) {
6711 spin_lock_irq(&phba->hbalock);
6712 spin_lock(&phba->sli4_hba.sgl_list_lock);
6713 list_splice_init(&post_sgl_list, sgl_list);
6714 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6715 spin_unlock_irq(&phba->hbalock);
6717 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6718 "3161 Failure to post sgl to port.\n");
6722 /* return the number of XRIs actually posted */
6727 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6731 len = sizeof(struct lpfc_mbx_set_host_data) -
6732 sizeof(struct lpfc_sli4_cfg_mhdr);
6733 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6734 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
6735 LPFC_SLI4_MBX_EMBED);
6737 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
6738 mbox->u.mqe.un.set_host_data.param_len =
6739 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
6740 snprintf(mbox->u.mqe.un.set_host_data.data,
6741 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
6742 "Linux %s v"LPFC_DRIVER_VERSION,
6743 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6747 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
6748 struct lpfc_queue *drq, int count, int idx)
6751 struct lpfc_rqe hrqe;
6752 struct lpfc_rqe drqe;
6753 struct lpfc_rqb *rqbp;
6754 unsigned long flags;
6755 struct rqb_dmabuf *rqb_buffer;
6756 LIST_HEAD(rqb_buf_list);
6759 for (i = 0; i < count; i++) {
6760 spin_lock_irqsave(&phba->hbalock, flags);
6761 /* IF RQ is already full, don't bother */
6762 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
6763 spin_unlock_irqrestore(&phba->hbalock, flags);
6766 spin_unlock_irqrestore(&phba->hbalock, flags);
6768 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
6771 rqb_buffer->hrq = hrq;
6772 rqb_buffer->drq = drq;
6773 rqb_buffer->idx = idx;
6774 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
6777 spin_lock_irqsave(&phba->hbalock, flags);
6778 while (!list_empty(&rqb_buf_list)) {
6779 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
6782 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
6783 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
6784 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
6785 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
6786 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
6788 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6789 "6421 Cannot post to HRQ %d: %x %x %x "
6797 rqbp->rqb_free_buffer(phba, rqb_buffer);
6799 list_add_tail(&rqb_buffer->hbuf.list,
6800 &rqbp->rqb_buffer_list);
6801 rqbp->buffer_count++;
6804 spin_unlock_irqrestore(&phba->hbalock, flags);
6809 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
6810 * @phba: Pointer to HBA context object.
6812 * This function is the main SLI4 device initialization PCI function. This
6813 * function is called by the HBA initialization code, HBA reset code and
6814 * HBA error attention handler code. Caller is not required to hold any
6818 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6821 LPFC_MBOXQ_t *mboxq;
6822 struct lpfc_mqe *mqe;
6825 uint32_t ftr_rsp = 0;
6826 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6827 struct lpfc_vport *vport = phba->pport;
6828 struct lpfc_dmabuf *mp;
6829 struct lpfc_rqb *rqbp;
6831 /* Perform a PCI function reset to start from clean */
6832 rc = lpfc_pci_function_reset(phba);
6836 /* Check the HBA Host Status Register for readyness */
6837 rc = lpfc_sli4_post_status_check(phba);
6841 spin_lock_irq(&phba->hbalock);
6842 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6843 spin_unlock_irq(&phba->hbalock);
6847 * Allocate a single mailbox container for initializing the
6850 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6854 /* Issue READ_REV to collect vpd and FW information. */
6855 vpd_size = SLI4_PAGE_SIZE;
6856 vpd = kzalloc(vpd_size, GFP_KERNEL);
6862 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6868 mqe = &mboxq->u.mqe;
6869 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6870 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
6871 phba->hba_flag |= HBA_FCOE_MODE;
6872 phba->fcp_embed_io = 0; /* SLI4 FC support only */
6874 phba->hba_flag &= ~HBA_FCOE_MODE;
6877 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6879 phba->hba_flag |= HBA_FIP_SUPPORT;
6881 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6883 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6885 if (phba->sli_rev != LPFC_SLI_REV4) {
6886 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6887 "0376 READ_REV Error. SLI Level %d "
6888 "FCoE enabled %d\n",
6889 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6896 * Continue initialization with default values even if driver failed
6897 * to read FCoE param config regions, only read parameters if the
6900 if (phba->hba_flag & HBA_FCOE_MODE &&
6901 lpfc_sli4_read_fcoe_params(phba))
6902 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6903 "2570 Failed to read FCoE parameters\n");
6906 * Retrieve sli4 device physical port name, failure of doing it
6907 * is considered as non-fatal.
6909 rc = lpfc_sli4_retrieve_pport_name(phba);
6911 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6912 "3080 Successful retrieving SLI4 device "
6913 "physical port name: %s.\n", phba->Port);
6916 * Evaluate the read rev and vpd data. Populate the driver
6917 * state with the results. If this routine fails, the failure
6918 * is not fatal as the driver will use generic values.
6920 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6921 if (unlikely(!rc)) {
6922 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6923 "0377 Error %d parsing vpd. "
6924 "Using defaults.\n", rc);
6929 /* Save information as VPD data */
6930 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6931 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6934 * This is because first G7 ASIC doesn't support the standard
6935 * 0x5a NVME cmd descriptor type/subtype
6937 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6938 LPFC_SLI_INTF_IF_TYPE_6) &&
6939 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
6940 (phba->vpd.rev.smRev == 0) &&
6941 (phba->cfg_nvme_embed_cmd == 1))
6942 phba->cfg_nvme_embed_cmd = 0;
6944 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6945 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6947 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6949 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6951 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6953 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6954 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6955 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6956 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6957 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6958 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6959 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6960 "(%d):0380 READ_REV Status x%x "
6961 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6962 mboxq->vport ? mboxq->vport->vpi : 0,
6963 bf_get(lpfc_mqe_status, mqe),
6964 phba->vpd.rev.opFwName,
6965 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6966 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6968 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
6969 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6970 if (phba->pport->cfg_lun_queue_depth > rc) {
6971 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6972 "3362 LUN queue depth changed from %d to %d\n",
6973 phba->pport->cfg_lun_queue_depth, rc);
6974 phba->pport->cfg_lun_queue_depth = rc;
6977 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6978 LPFC_SLI_INTF_IF_TYPE_0) {
6979 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
6980 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6981 if (rc == MBX_SUCCESS) {
6982 phba->hba_flag |= HBA_RECOVERABLE_UE;
6983 /* Set 1Sec interval to detect UE */
6984 phba->eratt_poll_interval = 1;
6985 phba->sli4_hba.ue_to_sr = bf_get(
6986 lpfc_mbx_set_feature_UESR,
6987 &mboxq->u.mqe.un.set_feature);
6988 phba->sli4_hba.ue_to_rp = bf_get(
6989 lpfc_mbx_set_feature_UERP,
6990 &mboxq->u.mqe.un.set_feature);
6994 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
6995 /* Enable MDS Diagnostics only if the SLI Port supports it */
6996 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
6997 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6998 if (rc != MBX_SUCCESS)
6999 phba->mds_diags_support = 0;
7003 * Discover the port's supported feature set and match it against the
7006 lpfc_request_features(phba, mboxq);
7007 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7014 * The port must support FCP initiator mode as this is the
7015 * only mode running in the host.
7017 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7018 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7019 "0378 No support for fcpi mode.\n");
7023 /* Performance Hints are ONLY for FCoE */
7024 if (phba->hba_flag & HBA_FCOE_MODE) {
7025 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7026 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7028 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7032 * If the port cannot support the host's requested features
7033 * then turn off the global config parameters to disable the
7034 * feature in the driver. This is not a fatal error.
7036 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7037 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7038 phba->cfg_enable_bg = 0;
7039 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7044 if (phba->max_vpi && phba->cfg_enable_npiv &&
7045 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7049 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7050 "0379 Feature Mismatch Data: x%08x %08x "
7051 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7052 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7053 phba->cfg_enable_npiv, phba->max_vpi);
7054 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7055 phba->cfg_enable_bg = 0;
7056 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7057 phba->cfg_enable_npiv = 0;
7060 /* These SLI3 features are assumed in SLI4 */
7061 spin_lock_irq(&phba->hbalock);
7062 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7063 spin_unlock_irq(&phba->hbalock);
7066 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7067 * calls depends on these resources to complete port setup.
7069 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7071 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7072 "2920 Failed to alloc Resource IDs "
7077 lpfc_set_host_data(phba, mboxq);
7079 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7081 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7082 "2134 Failed to set host os driver version %x",
7086 /* Read the port's service parameters. */
7087 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7089 phba->link_state = LPFC_HBA_ERROR;
7094 mboxq->vport = vport;
7095 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7096 mp = (struct lpfc_dmabuf *) mboxq->context1;
7097 if (rc == MBX_SUCCESS) {
7098 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7103 * This memory was allocated by the lpfc_read_sparam routine. Release
7104 * it to the mbuf pool.
7106 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7108 mboxq->context1 = NULL;
7110 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7111 "0382 READ_SPARAM command failed "
7112 "status %d, mbxStatus x%x\n",
7113 rc, bf_get(lpfc_mqe_status, mqe));
7114 phba->link_state = LPFC_HBA_ERROR;
7119 lpfc_update_vport_wwn(vport);
7121 /* Update the fc_host data structures with new wwn. */
7122 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7123 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7125 /* Create all the SLI4 queues */
7126 rc = lpfc_sli4_queue_create(phba);
7128 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7129 "3089 Failed to allocate queues\n");
7133 /* Set up all the queues to the device */
7134 rc = lpfc_sli4_queue_setup(phba);
7136 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7137 "0381 Error %d during queue setup.\n ", rc);
7138 goto out_stop_timers;
7140 /* Initialize the driver internal SLI layer lists. */
7141 lpfc_sli4_setup(phba);
7142 lpfc_sli4_queue_init(phba);
7144 /* update host els xri-sgl sizes and mappings */
7145 rc = lpfc_sli4_els_sgl_update(phba);
7147 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7148 "1400 Failed to update xri-sgl size and "
7149 "mapping: %d\n", rc);
7150 goto out_destroy_queue;
7153 /* register the els sgl pool to the port */
7154 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7155 phba->sli4_hba.els_xri_cnt);
7156 if (unlikely(rc < 0)) {
7157 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7158 "0582 Error %d during els sgl post "
7161 goto out_destroy_queue;
7163 phba->sli4_hba.els_xri_cnt = rc;
7165 if (phba->nvmet_support) {
7166 /* update host nvmet xri-sgl sizes and mappings */
7167 rc = lpfc_sli4_nvmet_sgl_update(phba);
7169 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7170 "6308 Failed to update nvmet-sgl size "
7171 "and mapping: %d\n", rc);
7172 goto out_destroy_queue;
7175 /* register the nvmet sgl pool to the port */
7176 rc = lpfc_sli4_repost_sgl_list(
7178 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7179 phba->sli4_hba.nvmet_xri_cnt);
7180 if (unlikely(rc < 0)) {
7181 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7182 "3117 Error %d during nvmet "
7185 goto out_destroy_queue;
7187 phba->sli4_hba.nvmet_xri_cnt = rc;
7189 cnt = phba->cfg_iocb_cnt * 1024;
7190 /* We need 1 iocbq for every SGL, for IO processing */
7191 cnt += phba->sli4_hba.nvmet_xri_cnt;
7193 /* update host scsi xri-sgl sizes and mappings */
7194 rc = lpfc_sli4_scsi_sgl_update(phba);
7196 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7197 "6309 Failed to update scsi-sgl size "
7198 "and mapping: %d\n", rc);
7199 goto out_destroy_queue;
7202 /* update host nvme xri-sgl sizes and mappings */
7203 rc = lpfc_sli4_nvme_sgl_update(phba);
7205 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7206 "6082 Failed to update nvme-sgl size "
7207 "and mapping: %d\n", rc);
7208 goto out_destroy_queue;
7211 cnt = phba->cfg_iocb_cnt * 1024;
7214 if (!phba->sli.iocbq_lookup) {
7215 /* Initialize and populate the iocb list per host */
7216 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7217 "2821 initialize iocb list %d total %d\n",
7218 phba->cfg_iocb_cnt, cnt);
7219 rc = lpfc_init_iocb_list(phba, cnt);
7221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7222 "1413 Failed to init iocb list.\n");
7223 goto out_destroy_queue;
7227 if (phba->nvmet_support)
7228 lpfc_nvmet_create_targetport(phba);
7230 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7231 /* Post initial buffers to all RQs created */
7232 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7233 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7234 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7235 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7236 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7237 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7238 rqbp->buffer_count = 0;
7240 lpfc_post_rq_buffer(
7241 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7242 phba->sli4_hba.nvmet_mrq_data[i],
7243 phba->cfg_nvmet_mrq_post, i);
7247 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
7248 /* register the allocated scsi sgl pool to the port */
7249 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
7251 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7252 "0383 Error %d during scsi sgl post "
7254 /* Some Scsi buffers were moved to abort scsi list */
7255 /* A pci function reset will repost them */
7257 goto out_destroy_queue;
7261 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
7262 (phba->nvmet_support == 0)) {
7264 /* register the allocated nvme sgl pool to the port */
7265 rc = lpfc_repost_nvme_sgl_list(phba);
7267 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7268 "6116 Error %d during nvme sgl post "
7270 /* Some NVME buffers were moved to abort nvme list */
7271 /* A pci function reset will repost them */
7273 goto out_destroy_queue;
7277 /* Post the rpi header region to the device. */
7278 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7280 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7281 "0393 Error %d during rpi post operation\n",
7284 goto out_free_iocblist;
7286 lpfc_sli4_node_prep(phba);
7288 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7289 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7291 * The FC Port needs to register FCFI (index 0)
7293 lpfc_reg_fcfi(phba, mboxq);
7294 mboxq->vport = phba->pport;
7295 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7296 if (rc != MBX_SUCCESS)
7297 goto out_unset_queue;
7299 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7300 &mboxq->u.mqe.un.reg_fcfi);
7302 /* We are a NVME Target mode with MRQ > 1 */
7304 /* First register the FCFI */
7305 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7306 mboxq->vport = phba->pport;
7307 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7308 if (rc != MBX_SUCCESS)
7309 goto out_unset_queue;
7311 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7312 &mboxq->u.mqe.un.reg_fcfi_mrq);
7314 /* Next register the MRQs */
7315 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7316 mboxq->vport = phba->pport;
7317 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7318 if (rc != MBX_SUCCESS)
7319 goto out_unset_queue;
7322 /* Check if the port is configured to be disabled */
7323 lpfc_sli_read_link_ste(phba);
7326 /* Arm the CQs and then EQs on device */
7327 lpfc_sli4_arm_cqeq_intr(phba);
7329 /* Indicate device interrupt mode */
7330 phba->sli4_hba.intr_enable = 1;
7332 /* Allow asynchronous mailbox command to go through */
7333 spin_lock_irq(&phba->hbalock);
7334 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7335 spin_unlock_irq(&phba->hbalock);
7337 /* Post receive buffers to the device */
7338 lpfc_sli4_rb_setup(phba);
7340 /* Reset HBA FCF states after HBA reset */
7341 phba->fcf.fcf_flag = 0;
7342 phba->fcf.current_rec.flag = 0;
7344 /* Start the ELS watchdog timer */
7345 mod_timer(&vport->els_tmofunc,
7346 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7348 /* Start heart beat timer */
7349 mod_timer(&phba->hb_tmofunc,
7350 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7351 phba->hb_outstanding = 0;
7352 phba->last_completion_time = jiffies;
7354 /* Start error attention (ERATT) polling timer */
7355 mod_timer(&phba->eratt_poll,
7356 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7358 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7359 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7360 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7362 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7363 "2829 This device supports "
7364 "Advanced Error Reporting (AER)\n");
7365 spin_lock_irq(&phba->hbalock);
7366 phba->hba_flag |= HBA_AER_ENABLED;
7367 spin_unlock_irq(&phba->hbalock);
7369 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7370 "2830 This device does not support "
7371 "Advanced Error Reporting (AER)\n");
7372 phba->cfg_aer_support = 0;
7378 * The port is ready, set the host's link state to LINK_DOWN
7379 * in preparation for link interrupts.
7381 spin_lock_irq(&phba->hbalock);
7382 phba->link_state = LPFC_LINK_DOWN;
7383 spin_unlock_irq(&phba->hbalock);
7384 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7385 (phba->hba_flag & LINK_DISABLED)) {
7386 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7387 "3103 Adapter Link is disabled.\n");
7388 lpfc_down_link(phba, mboxq);
7389 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7390 if (rc != MBX_SUCCESS) {
7391 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7392 "3104 Adapter failed to issue "
7393 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7394 goto out_unset_queue;
7396 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7397 /* don't perform init_link on SLI4 FC port loopback test */
7398 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7399 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7401 goto out_unset_queue;
7404 mempool_free(mboxq, phba->mbox_mem_pool);
7407 /* Unset all the queues set up in this routine when error out */
7408 lpfc_sli4_queue_unset(phba);
7410 lpfc_free_iocb_list(phba);
7412 lpfc_sli4_queue_destroy(phba);
7414 lpfc_stop_hba_timers(phba);
7416 mempool_free(mboxq, phba->mbox_mem_pool);
7421 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7422 * @ptr: context object - pointer to hba structure.
7424 * This is the callback function for mailbox timer. The mailbox
7425 * timer is armed when a new mailbox command is issued and the timer
7426 * is deleted when the mailbox complete. The function is called by
7427 * the kernel timer code when a mailbox does not complete within
7428 * expected time. This function wakes up the worker thread to
7429 * process the mailbox timeout and returns. All the processing is
7430 * done by the worker thread function lpfc_mbox_timeout_handler.
7433 lpfc_mbox_timeout(struct timer_list *t)
7435 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
7436 unsigned long iflag;
7437 uint32_t tmo_posted;
7439 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7440 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7442 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7443 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7446 lpfc_worker_wake_up(phba);
7451 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7453 * @phba: Pointer to HBA context object.
7455 * This function checks if any mailbox completions are present on the mailbox
7459 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7463 struct lpfc_queue *mcq;
7464 struct lpfc_mcqe *mcqe;
7465 bool pending_completions = false;
7468 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7471 /* Check for completions on mailbox completion queue */
7473 mcq = phba->sli4_hba.mbx_cq;
7474 idx = mcq->hba_index;
7475 qe_valid = mcq->qe_valid;
7476 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) {
7477 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
7478 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7479 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7480 pending_completions = true;
7483 idx = (idx + 1) % mcq->entry_count;
7484 if (mcq->hba_index == idx)
7487 /* if the index wrapped around, toggle the valid bit */
7488 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7489 qe_valid = (qe_valid) ? 0 : 1;
7491 return pending_completions;
7496 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7498 * @phba: Pointer to HBA context object.
7500 * For sli4, it is possible to miss an interrupt. As such mbox completions
7501 * maybe missed causing erroneous mailbox timeouts to occur. This function
7502 * checks to see if mbox completions are on the mailbox completion queue
7503 * and will process all the completions associated with the eq for the
7504 * mailbox completion queue.
7507 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7509 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7511 struct lpfc_queue *fpeq = NULL;
7512 struct lpfc_eqe *eqe;
7515 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7518 /* Find the eq associated with the mcq */
7520 if (sli4_hba->hba_eq)
7521 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
7522 if (sli4_hba->hba_eq[eqidx]->queue_id ==
7523 sli4_hba->mbx_cq->assoc_qid) {
7524 fpeq = sli4_hba->hba_eq[eqidx];
7530 /* Turn off interrupts from this EQ */
7532 sli4_hba->sli4_eq_clr_intr(fpeq);
7534 /* Check to see if a mbox completion is pending */
7536 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7539 * If a mbox completion is pending, process all the events on EQ
7540 * associated with the mbox completion queue (this could include
7541 * mailbox commands, async events, els commands, receive queue data
7546 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7547 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7548 fpeq->EQ_processed++;
7551 /* Always clear and re-arm the EQ */
7553 sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
7555 return mbox_pending;
7560 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7561 * @phba: Pointer to HBA context object.
7563 * This function is called from worker thread when a mailbox command times out.
7564 * The caller is not required to hold any locks. This function will reset the
7565 * HBA and recover all the pending commands.
7568 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7570 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7571 MAILBOX_t *mb = NULL;
7573 struct lpfc_sli *psli = &phba->sli;
7575 /* If the mailbox completed, process the completion and return */
7576 if (lpfc_sli4_process_missed_mbox_completions(phba))
7581 /* Check the pmbox pointer first. There is a race condition
7582 * between the mbox timeout handler getting executed in the
7583 * worklist and the mailbox actually completing. When this
7584 * race condition occurs, the mbox_active will be NULL.
7586 spin_lock_irq(&phba->hbalock);
7587 if (pmbox == NULL) {
7588 lpfc_printf_log(phba, KERN_WARNING,
7590 "0353 Active Mailbox cleared - mailbox timeout "
7592 spin_unlock_irq(&phba->hbalock);
7596 /* Mbox cmd <mbxCommand> timeout */
7597 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7598 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7600 phba->pport->port_state,
7602 phba->sli.mbox_active);
7603 spin_unlock_irq(&phba->hbalock);
7605 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7606 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
7607 * it to fail all outstanding SCSI IO.
7609 spin_lock_irq(&phba->pport->work_port_lock);
7610 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7611 spin_unlock_irq(&phba->pport->work_port_lock);
7612 spin_lock_irq(&phba->hbalock);
7613 phba->link_state = LPFC_LINK_UNKNOWN;
7614 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7615 spin_unlock_irq(&phba->hbalock);
7617 lpfc_sli_abort_fcp_rings(phba);
7619 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7620 "0345 Resetting board due to mailbox timeout\n");
7622 /* Reset the HBA device */
7623 lpfc_reset_hba(phba);
7627 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
7628 * @phba: Pointer to HBA context object.
7629 * @pmbox: Pointer to mailbox object.
7630 * @flag: Flag indicating how the mailbox need to be processed.
7632 * This function is called by discovery code and HBA management code
7633 * to submit a mailbox command to firmware with SLI-3 interface spec. This
7634 * function gets the hbalock to protect the data structures.
7635 * The mailbox command can be submitted in polling mode, in which case
7636 * this function will wait in a polling loop for the completion of the
7638 * If the mailbox is submitted in no_wait mode (not polling) the
7639 * function will submit the command and returns immediately without waiting
7640 * for the mailbox completion. The no_wait is supported only when HBA
7641 * is in SLI2/SLI3 mode - interrupts are enabled.
7642 * The SLI interface allows only one mailbox pending at a time. If the
7643 * mailbox is issued in polling mode and there is already a mailbox
7644 * pending, then the function will return an error. If the mailbox is issued
7645 * in NO_WAIT mode and there is a mailbox pending already, the function
7646 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7647 * The sli layer owns the mailbox object until the completion of mailbox
7648 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7649 * return codes the caller owns the mailbox command after the return of
7653 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
7657 struct lpfc_sli *psli = &phba->sli;
7658 uint32_t status, evtctr;
7659 uint32_t ha_copy, hc_copy;
7661 unsigned long timeout;
7662 unsigned long drvr_flag = 0;
7663 uint32_t word0, ldata;
7664 void __iomem *to_slim;
7665 int processing_queue = 0;
7667 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7669 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7670 /* processing mbox queue from intr_handler */
7671 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7672 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7675 processing_queue = 1;
7676 pmbox = lpfc_mbox_get(phba);
7678 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7683 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
7684 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
7686 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7687 lpfc_printf_log(phba, KERN_ERR,
7688 LOG_MBOX | LOG_VPORT,
7689 "1806 Mbox x%x failed. No vport\n",
7690 pmbox->u.mb.mbxCommand);
7692 goto out_not_finished;
7696 /* If the PCI channel is in offline state, do not post mbox. */
7697 if (unlikely(pci_channel_offline(phba->pcidev))) {
7698 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7699 goto out_not_finished;
7702 /* If HBA has a deferred error attention, fail the iocb. */
7703 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7704 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7705 goto out_not_finished;
7711 status = MBX_SUCCESS;
7713 if (phba->link_state == LPFC_HBA_ERROR) {
7714 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7716 /* Mbox command <mbxCommand> cannot issue */
7717 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7718 "(%d):0311 Mailbox command x%x cannot "
7719 "issue Data: x%x x%x\n",
7720 pmbox->vport ? pmbox->vport->vpi : 0,
7721 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7722 goto out_not_finished;
7725 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
7726 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
7727 !(hc_copy & HC_MBINT_ENA)) {
7728 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7729 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7730 "(%d):2528 Mailbox command x%x cannot "
7731 "issue Data: x%x x%x\n",
7732 pmbox->vport ? pmbox->vport->vpi : 0,
7733 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7734 goto out_not_finished;
7738 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7739 /* Polling for a mbox command when another one is already active
7740 * is not allowed in SLI. Also, the driver must have established
7741 * SLI2 mode to queue and process multiple mbox commands.
7744 if (flag & MBX_POLL) {
7745 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7747 /* Mbox command <mbxCommand> cannot issue */
7748 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7749 "(%d):2529 Mailbox command x%x "
7750 "cannot issue Data: x%x x%x\n",
7751 pmbox->vport ? pmbox->vport->vpi : 0,
7752 pmbox->u.mb.mbxCommand,
7753 psli->sli_flag, flag);
7754 goto out_not_finished;
7757 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
7758 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7759 /* Mbox command <mbxCommand> cannot issue */
7760 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7761 "(%d):2530 Mailbox command x%x "
7762 "cannot issue Data: x%x x%x\n",
7763 pmbox->vport ? pmbox->vport->vpi : 0,
7764 pmbox->u.mb.mbxCommand,
7765 psli->sli_flag, flag);
7766 goto out_not_finished;
7769 /* Another mailbox command is still being processed, queue this
7770 * command to be processed later.
7772 lpfc_mbox_put(phba, pmbox);
7774 /* Mbox cmd issue - BUSY */
7775 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7776 "(%d):0308 Mbox cmd issue - BUSY Data: "
7777 "x%x x%x x%x x%x\n",
7778 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
7780 phba->pport ? phba->pport->port_state : 0xff,
7781 psli->sli_flag, flag);
7783 psli->slistat.mbox_busy++;
7784 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7787 lpfc_debugfs_disc_trc(pmbox->vport,
7788 LPFC_DISC_TRC_MBOX_VPORT,
7789 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
7790 (uint32_t)mbx->mbxCommand,
7791 mbx->un.varWords[0], mbx->un.varWords[1]);
7794 lpfc_debugfs_disc_trc(phba->pport,
7796 "MBOX Bsy: cmd:x%x mb:x%x x%x",
7797 (uint32_t)mbx->mbxCommand,
7798 mbx->un.varWords[0], mbx->un.varWords[1]);
7804 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7806 /* If we are not polling, we MUST be in SLI2 mode */
7807 if (flag != MBX_POLL) {
7808 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
7809 (mbx->mbxCommand != MBX_KILL_BOARD)) {
7810 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7811 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7812 /* Mbox command <mbxCommand> cannot issue */
7813 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7814 "(%d):2531 Mailbox command x%x "
7815 "cannot issue Data: x%x x%x\n",
7816 pmbox->vport ? pmbox->vport->vpi : 0,
7817 pmbox->u.mb.mbxCommand,
7818 psli->sli_flag, flag);
7819 goto out_not_finished;
7821 /* timeout active mbox command */
7822 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7824 mod_timer(&psli->mbox_tmo, jiffies + timeout);
7827 /* Mailbox cmd <cmd> issue */
7828 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7829 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
7831 pmbox->vport ? pmbox->vport->vpi : 0,
7833 phba->pport ? phba->pport->port_state : 0xff,
7834 psli->sli_flag, flag);
7836 if (mbx->mbxCommand != MBX_HEARTBEAT) {
7838 lpfc_debugfs_disc_trc(pmbox->vport,
7839 LPFC_DISC_TRC_MBOX_VPORT,
7840 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7841 (uint32_t)mbx->mbxCommand,
7842 mbx->un.varWords[0], mbx->un.varWords[1]);
7845 lpfc_debugfs_disc_trc(phba->pport,
7847 "MBOX Send: cmd:x%x mb:x%x x%x",
7848 (uint32_t)mbx->mbxCommand,
7849 mbx->un.varWords[0], mbx->un.varWords[1]);
7853 psli->slistat.mbox_cmd++;
7854 evtctr = psli->slistat.mbox_event;
7856 /* next set own bit for the adapter and copy over command word */
7857 mbx->mbxOwner = OWN_CHIP;
7859 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7860 /* Populate mbox extension offset word. */
7861 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
7862 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7863 = (uint8_t *)phba->mbox_ext
7864 - (uint8_t *)phba->mbox;
7867 /* Copy the mailbox extension data */
7868 if (pmbox->in_ext_byte_len && pmbox->context2) {
7869 lpfc_sli_pcimem_bcopy(pmbox->context2,
7870 (uint8_t *)phba->mbox_ext,
7871 pmbox->in_ext_byte_len);
7873 /* Copy command data to host SLIM area */
7874 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7876 /* Populate mbox extension offset word. */
7877 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
7878 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7879 = MAILBOX_HBA_EXT_OFFSET;
7881 /* Copy the mailbox extension data */
7882 if (pmbox->in_ext_byte_len && pmbox->context2)
7883 lpfc_memcpy_to_slim(phba->MBslimaddr +
7884 MAILBOX_HBA_EXT_OFFSET,
7885 pmbox->context2, pmbox->in_ext_byte_len);
7887 if (mbx->mbxCommand == MBX_CONFIG_PORT)
7888 /* copy command data into host mbox for cmpl */
7889 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
7892 /* First copy mbox command data to HBA SLIM, skip past first
7894 to_slim = phba->MBslimaddr + sizeof (uint32_t);
7895 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
7896 MAILBOX_CMD_SIZE - sizeof (uint32_t));
7898 /* Next copy over first word, with mbxOwner set */
7899 ldata = *((uint32_t *)mbx);
7900 to_slim = phba->MBslimaddr;
7901 writel(ldata, to_slim);
7902 readl(to_slim); /* flush */
7904 if (mbx->mbxCommand == MBX_CONFIG_PORT)
7905 /* switch over to host mailbox */
7906 psli->sli_flag |= LPFC_SLI_ACTIVE;
7913 /* Set up reference to mailbox command */
7914 psli->mbox_active = pmbox;
7915 /* Interrupt board to do it */
7916 writel(CA_MBATT, phba->CAregaddr);
7917 readl(phba->CAregaddr); /* flush */
7918 /* Don't wait for it to finish, just return */
7922 /* Set up null reference to mailbox command */
7923 psli->mbox_active = NULL;
7924 /* Interrupt board to do it */
7925 writel(CA_MBATT, phba->CAregaddr);
7926 readl(phba->CAregaddr); /* flush */
7928 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7929 /* First read mbox status word */
7930 word0 = *((uint32_t *)phba->mbox);
7931 word0 = le32_to_cpu(word0);
7933 /* First read mbox status word */
7934 if (lpfc_readl(phba->MBslimaddr, &word0)) {
7935 spin_unlock_irqrestore(&phba->hbalock,
7937 goto out_not_finished;
7941 /* Read the HBA Host Attention Register */
7942 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7943 spin_unlock_irqrestore(&phba->hbalock,
7945 goto out_not_finished;
7947 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7950 /* Wait for command to complete */
7951 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7952 (!(ha_copy & HA_MBATT) &&
7953 (phba->link_state > LPFC_WARM_START))) {
7954 if (time_after(jiffies, timeout)) {
7955 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7956 spin_unlock_irqrestore(&phba->hbalock,
7958 goto out_not_finished;
7961 /* Check if we took a mbox interrupt while we were
7963 if (((word0 & OWN_CHIP) != OWN_CHIP)
7964 && (evtctr != psli->slistat.mbox_event))
7968 spin_unlock_irqrestore(&phba->hbalock,
7971 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7974 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7975 /* First copy command data */
7976 word0 = *((uint32_t *)phba->mbox);
7977 word0 = le32_to_cpu(word0);
7978 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7981 /* Check real SLIM for any errors */
7982 slimword0 = readl(phba->MBslimaddr);
7983 slimmb = (MAILBOX_t *) & slimword0;
7984 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7985 && slimmb->mbxStatus) {
7992 /* First copy command data */
7993 word0 = readl(phba->MBslimaddr);
7995 /* Read the HBA Host Attention Register */
7996 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7997 spin_unlock_irqrestore(&phba->hbalock,
7999 goto out_not_finished;
8003 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8004 /* copy results back to user */
8005 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8007 /* Copy the mailbox extension data */
8008 if (pmbox->out_ext_byte_len && pmbox->context2) {
8009 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8011 pmbox->out_ext_byte_len);
8014 /* First copy command data */
8015 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8017 /* Copy the mailbox extension data */
8018 if (pmbox->out_ext_byte_len && pmbox->context2) {
8019 lpfc_memcpy_from_slim(pmbox->context2,
8021 MAILBOX_HBA_EXT_OFFSET,
8022 pmbox->out_ext_byte_len);
8026 writel(HA_MBATT, phba->HAregaddr);
8027 readl(phba->HAregaddr); /* flush */
8029 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8030 status = mbx->mbxStatus;
8033 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8037 if (processing_queue) {
8038 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8039 lpfc_mbox_cmpl_put(phba, pmbox);
8041 return MBX_NOT_FINISHED;
8045 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8046 * @phba: Pointer to HBA context object.
8048 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8049 * the driver internal pending mailbox queue. It will then try to wait out the
8050 * possible outstanding mailbox command before return.
8053 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8054 * the outstanding mailbox command timed out.
8057 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8059 struct lpfc_sli *psli = &phba->sli;
8061 unsigned long timeout = 0;
8063 /* Mark the asynchronous mailbox command posting as blocked */
8064 spin_lock_irq(&phba->hbalock);
8065 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8066 /* Determine how long we might wait for the active mailbox
8067 * command to be gracefully completed by firmware.
8069 if (phba->sli.mbox_active)
8070 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8071 phba->sli.mbox_active) *
8073 spin_unlock_irq(&phba->hbalock);
8075 /* Make sure the mailbox is really active */
8077 lpfc_sli4_process_missed_mbox_completions(phba);
8079 /* Wait for the outstnading mailbox command to complete */
8080 while (phba->sli.mbox_active) {
8081 /* Check active mailbox complete status every 2ms */
8083 if (time_after(jiffies, timeout)) {
8084 /* Timeout, marked the outstanding cmd not complete */
8090 /* Can not cleanly block async mailbox command, fails it */
8092 spin_lock_irq(&phba->hbalock);
8093 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8094 spin_unlock_irq(&phba->hbalock);
8100 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8101 * @phba: Pointer to HBA context object.
8103 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8104 * commands from the driver internal pending mailbox queue. It makes sure
8105 * that there is no outstanding mailbox command before resuming posting
8106 * asynchronous mailbox commands. If, for any reason, there is outstanding
8107 * mailbox command, it will try to wait it out before resuming asynchronous
8108 * mailbox command posting.
8111 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8113 struct lpfc_sli *psli = &phba->sli;
8115 spin_lock_irq(&phba->hbalock);
8116 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8117 /* Asynchronous mailbox posting is not blocked, do nothing */
8118 spin_unlock_irq(&phba->hbalock);
8122 /* Outstanding synchronous mailbox command is guaranteed to be done,
8123 * successful or timeout, after timing-out the outstanding mailbox
8124 * command shall always be removed, so just unblock posting async
8125 * mailbox command and resume
8127 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8128 spin_unlock_irq(&phba->hbalock);
8130 /* wake up worker thread to post asynchronlous mailbox command */
8131 lpfc_worker_wake_up(phba);
8135 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8136 * @phba: Pointer to HBA context object.
8137 * @mboxq: Pointer to mailbox object.
8139 * The function waits for the bootstrap mailbox register ready bit from
8140 * port for twice the regular mailbox command timeout value.
8142 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8143 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8146 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8149 unsigned long timeout;
8150 struct lpfc_register bmbx_reg;
8152 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8156 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8157 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8161 if (time_after(jiffies, timeout))
8162 return MBXERR_ERROR;
8163 } while (!db_ready);
8169 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8170 * @phba: Pointer to HBA context object.
8171 * @mboxq: Pointer to mailbox object.
8173 * The function posts a mailbox to the port. The mailbox is expected
8174 * to be comletely filled in and ready for the port to operate on it.
8175 * This routine executes a synchronous completion operation on the
8176 * mailbox by polling for its completion.
8178 * The caller must not be holding any locks when calling this routine.
8181 * MBX_SUCCESS - mailbox posted successfully
8182 * Any of the MBX error values.
8185 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8187 int rc = MBX_SUCCESS;
8188 unsigned long iflag;
8189 uint32_t mcqe_status;
8191 struct lpfc_sli *psli = &phba->sli;
8192 struct lpfc_mqe *mb = &mboxq->u.mqe;
8193 struct lpfc_bmbx_create *mbox_rgn;
8194 struct dma_address *dma_address;
8197 * Only one mailbox can be active to the bootstrap mailbox region
8198 * at a time and there is no queueing provided.
8200 spin_lock_irqsave(&phba->hbalock, iflag);
8201 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8202 spin_unlock_irqrestore(&phba->hbalock, iflag);
8203 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8204 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8205 "cannot issue Data: x%x x%x\n",
8206 mboxq->vport ? mboxq->vport->vpi : 0,
8207 mboxq->u.mb.mbxCommand,
8208 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8209 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8210 psli->sli_flag, MBX_POLL);
8211 return MBXERR_ERROR;
8213 /* The server grabs the token and owns it until release */
8214 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8215 phba->sli.mbox_active = mboxq;
8216 spin_unlock_irqrestore(&phba->hbalock, iflag);
8218 /* wait for bootstrap mbox register for readyness */
8219 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8224 * Initialize the bootstrap memory region to avoid stale data areas
8225 * in the mailbox post. Then copy the caller's mailbox contents to
8226 * the bmbx mailbox region.
8228 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8229 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8230 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8231 sizeof(struct lpfc_mqe));
8233 /* Post the high mailbox dma address to the port and wait for ready. */
8234 dma_address = &phba->sli4_hba.bmbx.dma_address;
8235 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8237 /* wait for bootstrap mbox register for hi-address write done */
8238 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8242 /* Post the low mailbox dma address to the port. */
8243 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8245 /* wait for bootstrap mbox register for low address write done */
8246 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8251 * Read the CQ to ensure the mailbox has completed.
8252 * If so, update the mailbox status so that the upper layers
8253 * can complete the request normally.
8255 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8256 sizeof(struct lpfc_mqe));
8257 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8258 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8259 sizeof(struct lpfc_mcqe));
8260 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8262 * When the CQE status indicates a failure and the mailbox status
8263 * indicates success then copy the CQE status into the mailbox status
8264 * (and prefix it with x4000).
8266 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8267 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8268 bf_set(lpfc_mqe_status, mb,
8269 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8272 lpfc_sli4_swap_str(phba, mboxq);
8274 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8275 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8276 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8277 " x%x x%x CQ: x%x x%x x%x x%x\n",
8278 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8279 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8280 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8281 bf_get(lpfc_mqe_status, mb),
8282 mb->un.mb_words[0], mb->un.mb_words[1],
8283 mb->un.mb_words[2], mb->un.mb_words[3],
8284 mb->un.mb_words[4], mb->un.mb_words[5],
8285 mb->un.mb_words[6], mb->un.mb_words[7],
8286 mb->un.mb_words[8], mb->un.mb_words[9],
8287 mb->un.mb_words[10], mb->un.mb_words[11],
8288 mb->un.mb_words[12], mboxq->mcqe.word0,
8289 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8290 mboxq->mcqe.trailer);
8292 /* We are holding the token, no needed for lock when release */
8293 spin_lock_irqsave(&phba->hbalock, iflag);
8294 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8295 phba->sli.mbox_active = NULL;
8296 spin_unlock_irqrestore(&phba->hbalock, iflag);
8301 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8302 * @phba: Pointer to HBA context object.
8303 * @pmbox: Pointer to mailbox object.
8304 * @flag: Flag indicating how the mailbox need to be processed.
8306 * This function is called by discovery code and HBA management code to submit
8307 * a mailbox command to firmware with SLI-4 interface spec.
8309 * Return codes the caller owns the mailbox command after the return of the
8313 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8316 struct lpfc_sli *psli = &phba->sli;
8317 unsigned long iflags;
8320 /* dump from issue mailbox command if setup */
8321 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8323 rc = lpfc_mbox_dev_check(phba);
8325 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8326 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8327 "cannot issue Data: x%x x%x\n",
8328 mboxq->vport ? mboxq->vport->vpi : 0,
8329 mboxq->u.mb.mbxCommand,
8330 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8331 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8332 psli->sli_flag, flag);
8333 goto out_not_finished;
8336 /* Detect polling mode and jump to a handler */
8337 if (!phba->sli4_hba.intr_enable) {
8338 if (flag == MBX_POLL)
8339 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8342 if (rc != MBX_SUCCESS)
8343 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8344 "(%d):2541 Mailbox command x%x "
8345 "(x%x/x%x) failure: "
8346 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8348 mboxq->vport ? mboxq->vport->vpi : 0,
8349 mboxq->u.mb.mbxCommand,
8350 lpfc_sli_config_mbox_subsys_get(phba,
8352 lpfc_sli_config_mbox_opcode_get(phba,
8354 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8355 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8356 bf_get(lpfc_mcqe_ext_status,
8358 psli->sli_flag, flag);
8360 } else if (flag == MBX_POLL) {
8361 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8362 "(%d):2542 Try to issue mailbox command "
8363 "x%x (x%x/x%x) synchronously ahead of async "
8364 "mailbox command queue: x%x x%x\n",
8365 mboxq->vport ? mboxq->vport->vpi : 0,
8366 mboxq->u.mb.mbxCommand,
8367 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8368 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8369 psli->sli_flag, flag);
8370 /* Try to block the asynchronous mailbox posting */
8371 rc = lpfc_sli4_async_mbox_block(phba);
8373 /* Successfully blocked, now issue sync mbox cmd */
8374 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8375 if (rc != MBX_SUCCESS)
8376 lpfc_printf_log(phba, KERN_WARNING,
8378 "(%d):2597 Sync Mailbox command "
8379 "x%x (x%x/x%x) failure: "
8380 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8382 mboxq->vport ? mboxq->vport->vpi : 0,
8383 mboxq->u.mb.mbxCommand,
8384 lpfc_sli_config_mbox_subsys_get(phba,
8386 lpfc_sli_config_mbox_opcode_get(phba,
8388 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8389 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8390 bf_get(lpfc_mcqe_ext_status,
8392 psli->sli_flag, flag);
8393 /* Unblock the async mailbox posting afterward */
8394 lpfc_sli4_async_mbox_unblock(phba);
8399 /* Now, interrupt mode asynchrous mailbox command */
8400 rc = lpfc_mbox_cmd_check(phba, mboxq);
8402 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8403 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8404 "cannot issue Data: x%x x%x\n",
8405 mboxq->vport ? mboxq->vport->vpi : 0,
8406 mboxq->u.mb.mbxCommand,
8407 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8408 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8409 psli->sli_flag, flag);
8410 goto out_not_finished;
8413 /* Put the mailbox command to the driver internal FIFO */
8414 psli->slistat.mbox_busy++;
8415 spin_lock_irqsave(&phba->hbalock, iflags);
8416 lpfc_mbox_put(phba, mboxq);
8417 spin_unlock_irqrestore(&phba->hbalock, iflags);
8418 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8419 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8420 "x%x (x%x/x%x) x%x x%x x%x\n",
8421 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8422 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8423 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8424 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8425 phba->pport->port_state,
8426 psli->sli_flag, MBX_NOWAIT);
8427 /* Wake up worker thread to transport mailbox command from head */
8428 lpfc_worker_wake_up(phba);
8433 return MBX_NOT_FINISHED;
8437 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8438 * @phba: Pointer to HBA context object.
8440 * This function is called by worker thread to send a mailbox command to
8441 * SLI4 HBA firmware.
8445 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8447 struct lpfc_sli *psli = &phba->sli;
8448 LPFC_MBOXQ_t *mboxq;
8449 int rc = MBX_SUCCESS;
8450 unsigned long iflags;
8451 struct lpfc_mqe *mqe;
8454 /* Check interrupt mode before post async mailbox command */
8455 if (unlikely(!phba->sli4_hba.intr_enable))
8456 return MBX_NOT_FINISHED;
8458 /* Check for mailbox command service token */
8459 spin_lock_irqsave(&phba->hbalock, iflags);
8460 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8461 spin_unlock_irqrestore(&phba->hbalock, iflags);
8462 return MBX_NOT_FINISHED;
8464 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8465 spin_unlock_irqrestore(&phba->hbalock, iflags);
8466 return MBX_NOT_FINISHED;
8468 if (unlikely(phba->sli.mbox_active)) {
8469 spin_unlock_irqrestore(&phba->hbalock, iflags);
8470 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8471 "0384 There is pending active mailbox cmd\n");
8472 return MBX_NOT_FINISHED;
8474 /* Take the mailbox command service token */
8475 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8477 /* Get the next mailbox command from head of queue */
8478 mboxq = lpfc_mbox_get(phba);
8480 /* If no more mailbox command waiting for post, we're done */
8482 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8483 spin_unlock_irqrestore(&phba->hbalock, iflags);
8486 phba->sli.mbox_active = mboxq;
8487 spin_unlock_irqrestore(&phba->hbalock, iflags);
8489 /* Check device readiness for posting mailbox command */
8490 rc = lpfc_mbox_dev_check(phba);
8492 /* Driver clean routine will clean up pending mailbox */
8493 goto out_not_finished;
8495 /* Prepare the mbox command to be posted */
8496 mqe = &mboxq->u.mqe;
8497 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8499 /* Start timer for the mbox_tmo and log some mailbox post messages */
8500 mod_timer(&psli->mbox_tmo, (jiffies +
8501 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8503 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8504 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8506 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8507 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8508 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8509 phba->pport->port_state, psli->sli_flag);
8511 if (mbx_cmnd != MBX_HEARTBEAT) {
8513 lpfc_debugfs_disc_trc(mboxq->vport,
8514 LPFC_DISC_TRC_MBOX_VPORT,
8515 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8516 mbx_cmnd, mqe->un.mb_words[0],
8517 mqe->un.mb_words[1]);
8519 lpfc_debugfs_disc_trc(phba->pport,
8521 "MBOX Send: cmd:x%x mb:x%x x%x",
8522 mbx_cmnd, mqe->un.mb_words[0],
8523 mqe->un.mb_words[1]);
8526 psli->slistat.mbox_cmd++;
8528 /* Post the mailbox command to the port */
8529 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8530 if (rc != MBX_SUCCESS) {
8531 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8532 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8533 "cannot issue Data: x%x x%x\n",
8534 mboxq->vport ? mboxq->vport->vpi : 0,
8535 mboxq->u.mb.mbxCommand,
8536 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8537 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8538 psli->sli_flag, MBX_NOWAIT);
8539 goto out_not_finished;
8545 spin_lock_irqsave(&phba->hbalock, iflags);
8546 if (phba->sli.mbox_active) {
8547 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8548 __lpfc_mbox_cmpl_put(phba, mboxq);
8549 /* Release the token */
8550 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8551 phba->sli.mbox_active = NULL;
8553 spin_unlock_irqrestore(&phba->hbalock, iflags);
8555 return MBX_NOT_FINISHED;
8559 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8560 * @phba: Pointer to HBA context object.
8561 * @pmbox: Pointer to mailbox object.
8562 * @flag: Flag indicating how the mailbox need to be processed.
8564 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8565 * the API jump table function pointer from the lpfc_hba struct.
8567 * Return codes the caller owns the mailbox command after the return of the
8571 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8573 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8577 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
8578 * @phba: The hba struct for which this call is being executed.
8579 * @dev_grp: The HBA PCI-Device group number.
8581 * This routine sets up the mbox interface API function jump table in @phba
8583 * Returns: 0 - success, -ENODEV - failure.
8586 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8590 case LPFC_PCI_DEV_LP:
8591 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8592 phba->lpfc_sli_handle_slow_ring_event =
8593 lpfc_sli_handle_slow_ring_event_s3;
8594 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8595 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8596 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8598 case LPFC_PCI_DEV_OC:
8599 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8600 phba->lpfc_sli_handle_slow_ring_event =
8601 lpfc_sli_handle_slow_ring_event_s4;
8602 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8603 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8604 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8607 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8608 "1420 Invalid HBA PCI-device group: 0x%x\n",
8617 * __lpfc_sli_ringtx_put - Add an iocb to the txq
8618 * @phba: Pointer to HBA context object.
8619 * @pring: Pointer to driver SLI ring object.
8620 * @piocb: Pointer to address of newly added command iocb.
8622 * This function is called with hbalock held to add a command
8623 * iocb to the txq when SLI layer cannot submit the command iocb
8627 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8628 struct lpfc_iocbq *piocb)
8630 lockdep_assert_held(&phba->hbalock);
8631 /* Insert the caller's iocb in the txq tail for later processing. */
8632 list_add_tail(&piocb->list, &pring->txq);
8636 * lpfc_sli_next_iocb - Get the next iocb in the txq
8637 * @phba: Pointer to HBA context object.
8638 * @pring: Pointer to driver SLI ring object.
8639 * @piocb: Pointer to address of newly added command iocb.
8641 * This function is called with hbalock held before a new
8642 * iocb is submitted to the firmware. This function checks
8643 * txq to flush the iocbs in txq to Firmware before
8644 * submitting new iocbs to the Firmware.
8645 * If there are iocbs in the txq which need to be submitted
8646 * to firmware, lpfc_sli_next_iocb returns the first element
8647 * of the txq after dequeuing it from txq.
8648 * If there is no iocb in the txq then the function will return
8649 * *piocb and *piocb is set to NULL. Caller needs to check
8650 * *piocb to find if there are more commands in the txq.
8652 static struct lpfc_iocbq *
8653 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8654 struct lpfc_iocbq **piocb)
8656 struct lpfc_iocbq * nextiocb;
8658 lockdep_assert_held(&phba->hbalock);
8660 nextiocb = lpfc_sli_ringtx_get(phba, pring);
8670 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
8671 * @phba: Pointer to HBA context object.
8672 * @ring_number: SLI ring number to issue iocb on.
8673 * @piocb: Pointer to command iocb.
8674 * @flag: Flag indicating if this command can be put into txq.
8676 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
8677 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
8678 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
8679 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
8680 * this function allows only iocbs for posting buffers. This function finds
8681 * next available slot in the command ring and posts the command to the
8682 * available slot and writes the port attention register to request HBA start
8683 * processing new iocb. If there is no slot available in the ring and
8684 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
8685 * the function returns IOCB_BUSY.
8687 * This function is called with hbalock held. The function will return success
8688 * after it successfully submit the iocb to firmware or after adding to the
8692 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
8693 struct lpfc_iocbq *piocb, uint32_t flag)
8695 struct lpfc_iocbq *nextiocb;
8697 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
8699 lockdep_assert_held(&phba->hbalock);
8701 if (piocb->iocb_cmpl && (!piocb->vport) &&
8702 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
8703 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8704 lpfc_printf_log(phba, KERN_ERR,
8705 LOG_SLI | LOG_VPORT,
8706 "1807 IOCB x%x failed. No vport\n",
8707 piocb->iocb.ulpCommand);
8713 /* If the PCI channel is in offline state, do not post iocbs. */
8714 if (unlikely(pci_channel_offline(phba->pcidev)))
8717 /* If HBA has a deferred error attention, fail the iocb. */
8718 if (unlikely(phba->hba_flag & DEFER_ERATT))
8722 * We should never get an IOCB if we are in a < LINK_DOWN state
8724 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
8728 * Check to see if we are blocking IOCB processing because of a
8729 * outstanding event.
8731 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
8734 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
8736 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
8737 * can be issued if the link is not up.
8739 switch (piocb->iocb.ulpCommand) {
8740 case CMD_GEN_REQUEST64_CR:
8741 case CMD_GEN_REQUEST64_CX:
8742 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8743 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
8744 FC_RCTL_DD_UNSOL_CMD) ||
8745 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
8746 MENLO_TRANSPORT_TYPE))
8750 case CMD_QUE_RING_BUF_CN:
8751 case CMD_QUE_RING_BUF64_CN:
8753 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
8754 * completion, iocb_cmpl MUST be 0.
8756 if (piocb->iocb_cmpl)
8757 piocb->iocb_cmpl = NULL;
8759 case CMD_CREATE_XRI_CR:
8760 case CMD_CLOSE_XRI_CN:
8761 case CMD_CLOSE_XRI_CX:
8768 * For FCP commands, we must be in a state where we can process link
8771 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
8772 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
8776 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8777 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8778 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8781 lpfc_sli_update_ring(phba, pring);
8783 lpfc_sli_update_full_ring(phba, pring);
8786 return IOCB_SUCCESS;
8791 pring->stats.iocb_cmd_delay++;
8795 if (!(flag & SLI_IOCB_RET_IOCB)) {
8796 __lpfc_sli_ringtx_put(phba, pring, piocb);
8797 return IOCB_SUCCESS;
8804 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
8805 * @phba: Pointer to HBA context object.
8806 * @piocb: Pointer to command iocb.
8807 * @sglq: Pointer to the scatter gather queue object.
8809 * This routine converts the bpl or bde that is in the IOCB
8810 * to a sgl list for the sli4 hardware. The physical address
8811 * of the bpl/bde is converted back to a virtual address.
8812 * If the IOCB contains a BPL then the list of BDE's is
8813 * converted to sli4_sge's. If the IOCB contains a single
8814 * BDE then it is converted to a single sli_sge.
8815 * The IOCB is still in cpu endianess so the contents of
8816 * the bpl can be used without byte swapping.
8818 * Returns valid XRI = Success, NO_XRI = Failure.
8821 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8822 struct lpfc_sglq *sglq)
8824 uint16_t xritag = NO_XRI;
8825 struct ulp_bde64 *bpl = NULL;
8826 struct ulp_bde64 bde;
8827 struct sli4_sge *sgl = NULL;
8828 struct lpfc_dmabuf *dmabuf;
8832 uint32_t offset = 0; /* accumulated offset in the sg request list */
8833 int inbound = 0; /* number of sg reply entries inbound from firmware */
8835 if (!piocbq || !sglq)
8838 sgl = (struct sli4_sge *)sglq->sgl;
8839 icmd = &piocbq->iocb;
8840 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8841 return sglq->sli4_xritag;
8842 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8843 numBdes = icmd->un.genreq64.bdl.bdeSize /
8844 sizeof(struct ulp_bde64);
8845 /* The addrHigh and addrLow fields within the IOCB
8846 * have not been byteswapped yet so there is no
8847 * need to swap them back.
8849 if (piocbq->context3)
8850 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8854 bpl = (struct ulp_bde64 *)dmabuf->virt;
8858 for (i = 0; i < numBdes; i++) {
8859 /* Should already be byte swapped. */
8860 sgl->addr_hi = bpl->addrHigh;
8861 sgl->addr_lo = bpl->addrLow;
8863 sgl->word2 = le32_to_cpu(sgl->word2);
8864 if ((i+1) == numBdes)
8865 bf_set(lpfc_sli4_sge_last, sgl, 1);
8867 bf_set(lpfc_sli4_sge_last, sgl, 0);
8868 /* swap the size field back to the cpu so we
8869 * can assign it to the sgl.
8871 bde.tus.w = le32_to_cpu(bpl->tus.w);
8872 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
8873 /* The offsets in the sgl need to be accumulated
8874 * separately for the request and reply lists.
8875 * The request is always first, the reply follows.
8877 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8878 /* add up the reply sg entries */
8879 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8881 /* first inbound? reset the offset */
8884 bf_set(lpfc_sli4_sge_offset, sgl, offset);
8885 bf_set(lpfc_sli4_sge_type, sgl,
8886 LPFC_SGE_TYPE_DATA);
8887 offset += bde.tus.f.bdeSize;
8889 sgl->word2 = cpu_to_le32(sgl->word2);
8893 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8894 /* The addrHigh and addrLow fields of the BDE have not
8895 * been byteswapped yet so they need to be swapped
8896 * before putting them in the sgl.
8899 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8901 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
8902 sgl->word2 = le32_to_cpu(sgl->word2);
8903 bf_set(lpfc_sli4_sge_last, sgl, 1);
8904 sgl->word2 = cpu_to_le32(sgl->word2);
8906 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
8908 return sglq->sli4_xritag;
8912 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
8913 * @phba: Pointer to HBA context object.
8914 * @piocb: Pointer to command iocb.
8915 * @wqe: Pointer to the work queue entry.
8917 * This routine converts the iocb command to its Work Queue Entry
8918 * equivalent. The wqe pointer should not have any fields set when
8919 * this routine is called because it will memcpy over them.
8920 * This routine does not set the CQ_ID or the WQEC bits in the
8923 * Returns: 0 = Success, IOCB_ERROR = Failure.
8926 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8927 union lpfc_wqe128 *wqe)
8929 uint32_t xmit_len = 0, total_len = 0;
8933 uint8_t command_type = ELS_COMMAND_NON_FIP;
8936 uint16_t abrt_iotag;
8937 struct lpfc_iocbq *abrtiocbq;
8938 struct ulp_bde64 *bpl = NULL;
8939 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
8941 struct ulp_bde64 bde;
8942 struct lpfc_nodelist *ndlp;
8946 fip = phba->hba_flag & HBA_FIP_SUPPORT;
8947 /* The fcp commands will set command type */
8948 if (iocbq->iocb_flag & LPFC_IO_FCP)
8949 command_type = FCP_COMMAND;
8950 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
8951 command_type = ELS_COMMAND_FIP;
8953 command_type = ELS_COMMAND_NON_FIP;
8955 if (phba->fcp_embed_io)
8956 memset(wqe, 0, sizeof(union lpfc_wqe128));
8957 /* Some of the fields are in the right position already */
8958 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8959 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
8960 /* The ct field has moved so reset */
8961 wqe->generic.wqe_com.word7 = 0;
8962 wqe->generic.wqe_com.word10 = 0;
8965 abort_tag = (uint32_t) iocbq->iotag;
8966 xritag = iocbq->sli4_xritag;
8967 /* words0-2 bpl convert bde */
8968 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8969 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8970 sizeof(struct ulp_bde64);
8971 bpl = (struct ulp_bde64 *)
8972 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
8976 /* Should already be byte swapped. */
8977 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
8978 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
8979 /* swap the size field back to the cpu so we
8980 * can assign it to the sgl.
8982 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
8983 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8985 for (i = 0; i < numBdes; i++) {
8986 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8987 total_len += bde.tus.f.bdeSize;
8990 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
8992 iocbq->iocb.ulpIoTag = iocbq->iotag;
8993 cmnd = iocbq->iocb.ulpCommand;
8995 switch (iocbq->iocb.ulpCommand) {
8996 case CMD_ELS_REQUEST64_CR:
8997 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8998 ndlp = iocbq->context_un.ndlp;
9000 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9001 if (!iocbq->iocb.ulpLe) {
9002 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9003 "2007 Only Limited Edition cmd Format"
9004 " supported 0x%x\n",
9005 iocbq->iocb.ulpCommand);
9009 wqe->els_req.payload_len = xmit_len;
9010 /* Els_reguest64 has a TMO */
9011 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9012 iocbq->iocb.ulpTimeout);
9013 /* Need a VF for word 4 set the vf bit*/
9014 bf_set(els_req64_vf, &wqe->els_req, 0);
9015 /* And a VFID for word 12 */
9016 bf_set(els_req64_vfid, &wqe->els_req, 0);
9017 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9018 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9019 iocbq->iocb.ulpContext);
9020 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9021 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9022 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9023 if (command_type == ELS_COMMAND_FIP)
9024 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9025 >> LPFC_FIP_ELS_ID_SHIFT);
9026 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9027 iocbq->context2)->virt);
9028 if_type = bf_get(lpfc_sli_intf_if_type,
9029 &phba->sli4_hba.sli_intf);
9030 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9031 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9032 *pcmd == ELS_CMD_SCR ||
9033 *pcmd == ELS_CMD_FDISC ||
9034 *pcmd == ELS_CMD_LOGO ||
9035 *pcmd == ELS_CMD_PLOGI)) {
9036 bf_set(els_req64_sp, &wqe->els_req, 1);
9037 bf_set(els_req64_sid, &wqe->els_req,
9038 iocbq->vport->fc_myDID);
9039 if ((*pcmd == ELS_CMD_FLOGI) &&
9040 !(phba->fc_topology ==
9041 LPFC_TOPOLOGY_LOOP))
9042 bf_set(els_req64_sid, &wqe->els_req, 0);
9043 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9044 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9045 phba->vpi_ids[iocbq->vport->vpi]);
9046 } else if (pcmd && iocbq->context1) {
9047 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9048 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9049 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9052 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9053 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9054 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9055 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9056 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9057 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9058 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9059 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9060 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9062 case CMD_XMIT_SEQUENCE64_CX:
9063 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9064 iocbq->iocb.un.ulpWord[3]);
9065 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9066 iocbq->iocb.unsli3.rcvsli3.ox_id);
9067 /* The entire sequence is transmitted for this IOCB */
9068 xmit_len = total_len;
9069 cmnd = CMD_XMIT_SEQUENCE64_CR;
9070 if (phba->link_flag & LS_LOOPBACK_MODE)
9071 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9072 case CMD_XMIT_SEQUENCE64_CR:
9073 /* word3 iocb=io_tag32 wqe=reserved */
9074 wqe->xmit_sequence.rsvd3 = 0;
9075 /* word4 relative_offset memcpy */
9076 /* word5 r_ctl/df_ctl memcpy */
9077 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9078 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9079 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9080 LPFC_WQE_IOD_WRITE);
9081 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9082 LPFC_WQE_LENLOC_WORD12);
9083 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9084 wqe->xmit_sequence.xmit_len = xmit_len;
9085 command_type = OTHER_COMMAND;
9087 case CMD_XMIT_BCAST64_CN:
9088 /* word3 iocb=iotag32 wqe=seq_payload_len */
9089 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9090 /* word4 iocb=rsvd wqe=rsvd */
9091 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9092 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9093 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9094 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9095 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9096 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9097 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9098 LPFC_WQE_LENLOC_WORD3);
9099 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9101 case CMD_FCP_IWRITE64_CR:
9102 command_type = FCP_COMMAND_DATA_OUT;
9103 /* word3 iocb=iotag wqe=payload_offset_len */
9104 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9105 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9106 xmit_len + sizeof(struct fcp_rsp));
9107 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9109 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9110 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9111 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9112 iocbq->iocb.ulpFCP2Rcvy);
9113 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9114 /* Always open the exchange */
9115 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9116 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9117 LPFC_WQE_LENLOC_WORD4);
9118 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9119 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9120 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9121 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9122 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9123 if (iocbq->priority) {
9124 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9125 (iocbq->priority << 1));
9127 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9128 (phba->cfg_XLanePriority << 1));
9131 /* Note, word 10 is already initialized to 0 */
9133 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9134 if (phba->cfg_enable_pbde)
9135 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9137 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9139 if (phba->fcp_embed_io) {
9140 struct lpfc_scsi_buf *lpfc_cmd;
9141 struct sli4_sge *sgl;
9142 struct fcp_cmnd *fcp_cmnd;
9145 /* 128 byte wqe support here */
9147 lpfc_cmd = iocbq->context1;
9148 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9149 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9151 /* Word 0-2 - FCP_CMND */
9152 wqe->generic.bde.tus.f.bdeFlags =
9153 BUFF_TYPE_BDE_IMMED;
9154 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9155 wqe->generic.bde.addrHigh = 0;
9156 wqe->generic.bde.addrLow = 88; /* Word 22 */
9158 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9159 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9161 /* Word 22-29 FCP CMND Payload */
9162 ptr = &wqe->words[22];
9163 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9166 case CMD_FCP_IREAD64_CR:
9167 /* word3 iocb=iotag wqe=payload_offset_len */
9168 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9169 bf_set(payload_offset_len, &wqe->fcp_iread,
9170 xmit_len + sizeof(struct fcp_rsp));
9171 bf_set(cmd_buff_len, &wqe->fcp_iread,
9173 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9174 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9175 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9176 iocbq->iocb.ulpFCP2Rcvy);
9177 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9178 /* Always open the exchange */
9179 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9180 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9181 LPFC_WQE_LENLOC_WORD4);
9182 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9183 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9184 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9185 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9186 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9187 if (iocbq->priority) {
9188 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9189 (iocbq->priority << 1));
9191 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9192 (phba->cfg_XLanePriority << 1));
9195 /* Note, word 10 is already initialized to 0 */
9197 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9198 if (phba->cfg_enable_pbde)
9199 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9201 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9203 if (phba->fcp_embed_io) {
9204 struct lpfc_scsi_buf *lpfc_cmd;
9205 struct sli4_sge *sgl;
9206 struct fcp_cmnd *fcp_cmnd;
9209 /* 128 byte wqe support here */
9211 lpfc_cmd = iocbq->context1;
9212 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9213 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9215 /* Word 0-2 - FCP_CMND */
9216 wqe->generic.bde.tus.f.bdeFlags =
9217 BUFF_TYPE_BDE_IMMED;
9218 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9219 wqe->generic.bde.addrHigh = 0;
9220 wqe->generic.bde.addrLow = 88; /* Word 22 */
9222 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9223 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9225 /* Word 22-29 FCP CMND Payload */
9226 ptr = &wqe->words[22];
9227 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9230 case CMD_FCP_ICMND64_CR:
9231 /* word3 iocb=iotag wqe=payload_offset_len */
9232 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9233 bf_set(payload_offset_len, &wqe->fcp_icmd,
9234 xmit_len + sizeof(struct fcp_rsp));
9235 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9237 /* word3 iocb=IO_TAG wqe=reserved */
9238 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9239 /* Always open the exchange */
9240 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9241 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9242 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9243 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9244 LPFC_WQE_LENLOC_NONE);
9245 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9246 iocbq->iocb.ulpFCP2Rcvy);
9247 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9248 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9249 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9250 if (iocbq->priority) {
9251 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9252 (iocbq->priority << 1));
9254 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9255 (phba->cfg_XLanePriority << 1));
9258 /* Note, word 10 is already initialized to 0 */
9260 if (phba->fcp_embed_io) {
9261 struct lpfc_scsi_buf *lpfc_cmd;
9262 struct sli4_sge *sgl;
9263 struct fcp_cmnd *fcp_cmnd;
9266 /* 128 byte wqe support here */
9268 lpfc_cmd = iocbq->context1;
9269 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9270 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9272 /* Word 0-2 - FCP_CMND */
9273 wqe->generic.bde.tus.f.bdeFlags =
9274 BUFF_TYPE_BDE_IMMED;
9275 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9276 wqe->generic.bde.addrHigh = 0;
9277 wqe->generic.bde.addrLow = 88; /* Word 22 */
9279 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9280 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9282 /* Word 22-29 FCP CMND Payload */
9283 ptr = &wqe->words[22];
9284 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9287 case CMD_GEN_REQUEST64_CR:
9288 /* For this command calculate the xmit length of the
9292 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9293 sizeof(struct ulp_bde64);
9294 for (i = 0; i < numBdes; i++) {
9295 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9296 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9298 xmit_len += bde.tus.f.bdeSize;
9300 /* word3 iocb=IO_TAG wqe=request_payload_len */
9301 wqe->gen_req.request_payload_len = xmit_len;
9302 /* word4 iocb=parameter wqe=relative_offset memcpy */
9303 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9304 /* word6 context tag copied in memcpy */
9305 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9306 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9307 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9308 "2015 Invalid CT %x command 0x%x\n",
9309 ct, iocbq->iocb.ulpCommand);
9312 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9313 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9314 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9315 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9316 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9317 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9318 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9319 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9320 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9321 command_type = OTHER_COMMAND;
9323 case CMD_XMIT_ELS_RSP64_CX:
9324 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9325 /* words0-2 BDE memcpy */
9326 /* word3 iocb=iotag32 wqe=response_payload_len */
9327 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9329 wqe->xmit_els_rsp.word4 = 0;
9330 /* word5 iocb=rsvd wge=did */
9331 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9332 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9334 if_type = bf_get(lpfc_sli_intf_if_type,
9335 &phba->sli4_hba.sli_intf);
9336 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9337 if (iocbq->vport->fc_flag & FC_PT2PT) {
9338 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9339 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9340 iocbq->vport->fc_myDID);
9341 if (iocbq->vport->fc_myDID == Fabric_DID) {
9343 &wqe->xmit_els_rsp.wqe_dest, 0);
9347 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9348 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9349 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9350 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9351 iocbq->iocb.unsli3.rcvsli3.ox_id);
9352 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9353 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9354 phba->vpi_ids[iocbq->vport->vpi]);
9355 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9356 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9357 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9358 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9359 LPFC_WQE_LENLOC_WORD3);
9360 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9361 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9362 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9363 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9364 iocbq->context2)->virt);
9365 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9366 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9367 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9368 iocbq->vport->fc_myDID);
9369 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9370 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9371 phba->vpi_ids[phba->pport->vpi]);
9373 command_type = OTHER_COMMAND;
9375 case CMD_CLOSE_XRI_CN:
9376 case CMD_ABORT_XRI_CN:
9377 case CMD_ABORT_XRI_CX:
9378 /* words 0-2 memcpy should be 0 rserved */
9379 /* port will send abts */
9380 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9381 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9382 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9383 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9387 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9389 * The link is down, or the command was ELS_FIP
9390 * so the fw does not need to send abts
9393 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9395 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9396 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9397 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9398 wqe->abort_cmd.rsrvd5 = 0;
9399 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9400 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9401 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9403 * The abort handler will send us CMD_ABORT_XRI_CN or
9404 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9406 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9407 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9408 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9409 LPFC_WQE_LENLOC_NONE);
9410 cmnd = CMD_ABORT_XRI_CX;
9411 command_type = OTHER_COMMAND;
9414 case CMD_XMIT_BLS_RSP64_CX:
9415 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9416 /* As BLS ABTS RSP WQE is very different from other WQEs,
9417 * we re-construct this WQE here based on information in
9418 * iocbq from scratch.
9420 memset(wqe, 0, sizeof(union lpfc_wqe));
9421 /* OX_ID is invariable to who sent ABTS to CT exchange */
9422 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9423 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9424 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9425 LPFC_ABTS_UNSOL_INT) {
9426 /* ABTS sent by initiator to CT exchange, the
9427 * RX_ID field will be filled with the newly
9428 * allocated responder XRI.
9430 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9431 iocbq->sli4_xritag);
9433 /* ABTS sent by responder to CT exchange, the
9434 * RX_ID field will be filled with the responder
9437 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9438 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9440 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9441 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9444 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9446 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9447 iocbq->iocb.ulpContext);
9448 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9449 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9450 phba->vpi_ids[phba->pport->vpi]);
9451 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9452 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9453 LPFC_WQE_LENLOC_NONE);
9454 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9455 command_type = OTHER_COMMAND;
9456 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9457 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9458 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9459 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9460 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9461 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9462 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9466 case CMD_SEND_FRAME:
9467 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9468 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9470 case CMD_XRI_ABORTED_CX:
9471 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9472 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9473 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9474 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9475 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9477 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9478 "2014 Invalid command 0x%x\n",
9479 iocbq->iocb.ulpCommand);
9484 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9485 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9486 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9487 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9488 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9489 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9490 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9491 LPFC_IO_DIF_INSERT);
9492 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9493 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9494 wqe->generic.wqe_com.abort_tag = abort_tag;
9495 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9496 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9497 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9498 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9503 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9504 * @phba: Pointer to HBA context object.
9505 * @ring_number: SLI ring number to issue iocb on.
9506 * @piocb: Pointer to command iocb.
9507 * @flag: Flag indicating if this command can be put into txq.
9509 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9510 * an iocb command to an HBA with SLI-4 interface spec.
9512 * This function is called with hbalock held. The function will return success
9513 * after it successfully submit the iocb to firmware or after adding to the
9517 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9518 struct lpfc_iocbq *piocb, uint32_t flag)
9520 struct lpfc_sglq *sglq;
9521 union lpfc_wqe128 wqe;
9522 struct lpfc_queue *wq;
9523 struct lpfc_sli_ring *pring;
9526 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9527 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9528 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
9529 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
9531 wq = phba->sli4_hba.oas_wq;
9533 wq = phba->sli4_hba.els_wq;
9536 /* Get corresponding ring */
9540 * The WQE can be either 64 or 128 bytes,
9543 lockdep_assert_held(&phba->hbalock);
9545 if (piocb->sli4_xritag == NO_XRI) {
9546 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9547 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9550 if (!list_empty(&pring->txq)) {
9551 if (!(flag & SLI_IOCB_RET_IOCB)) {
9552 __lpfc_sli_ringtx_put(phba,
9554 return IOCB_SUCCESS;
9559 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9561 if (!(flag & SLI_IOCB_RET_IOCB)) {
9562 __lpfc_sli_ringtx_put(phba,
9565 return IOCB_SUCCESS;
9571 } else if (piocb->iocb_flag & LPFC_IO_FCP)
9572 /* These IO's already have an XRI and a mapped sgl. */
9576 * This is a continuation of a commandi,(CX) so this
9577 * sglq is on the active list
9579 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9585 piocb->sli4_lxritag = sglq->sli4_lxritag;
9586 piocb->sli4_xritag = sglq->sli4_xritag;
9587 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9591 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
9594 if (lpfc_sli4_wq_put(wq, &wqe))
9596 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9602 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9604 * This routine wraps the actual lockless version for issusing IOCB function
9605 * pointer from the lpfc_hba struct.
9608 * IOCB_ERROR - Error
9609 * IOCB_SUCCESS - Success
9613 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9614 struct lpfc_iocbq *piocb, uint32_t flag)
9616 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9620 * lpfc_sli_api_table_setup - Set up sli api function jump table
9621 * @phba: The hba struct for which this call is being executed.
9622 * @dev_grp: The HBA PCI-Device group number.
9624 * This routine sets up the SLI interface API function jump table in @phba
9626 * Returns: 0 - success, -ENODEV - failure.
9629 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9633 case LPFC_PCI_DEV_LP:
9634 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9635 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9637 case LPFC_PCI_DEV_OC:
9638 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9639 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9642 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9643 "1419 Invalid HBA PCI-device group: 0x%x\n",
9648 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
9653 * lpfc_sli4_calc_ring - Calculates which ring to use
9654 * @phba: Pointer to HBA context object.
9655 * @piocb: Pointer to command iocb.
9657 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
9658 * hba_wqidx, thus we need to calculate the corresponding ring.
9659 * Since ABORTS must go on the same WQ of the command they are
9660 * aborting, we use command's hba_wqidx.
9662 struct lpfc_sli_ring *
9663 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9665 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
9666 if (!(phba->cfg_fof) ||
9667 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
9668 if (unlikely(!phba->sli4_hba.fcp_wq))
9671 * for abort iocb hba_wqidx should already
9672 * be setup based on what work queue we used.
9674 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9676 lpfc_sli4_scmd_to_wqidx_distr(phba,
9678 piocb->hba_wqidx = piocb->hba_wqidx %
9679 phba->cfg_fcp_io_channel;
9681 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
9683 if (unlikely(!phba->sli4_hba.oas_wq))
9685 piocb->hba_wqidx = 0;
9686 return phba->sli4_hba.oas_wq->pring;
9689 if (unlikely(!phba->sli4_hba.els_wq))
9691 piocb->hba_wqidx = 0;
9692 return phba->sli4_hba.els_wq->pring;
9697 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
9698 * @phba: Pointer to HBA context object.
9699 * @pring: Pointer to driver SLI ring object.
9700 * @piocb: Pointer to command iocb.
9701 * @flag: Flag indicating if this command can be put into txq.
9703 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
9704 * function. This function gets the hbalock and calls
9705 * __lpfc_sli_issue_iocb function and will return the error returned
9706 * by __lpfc_sli_issue_iocb function. This wrapper is used by
9707 * functions which do not hold hbalock.
9710 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9711 struct lpfc_iocbq *piocb, uint32_t flag)
9713 struct lpfc_hba_eq_hdl *hba_eq_hdl;
9714 struct lpfc_sli_ring *pring;
9715 struct lpfc_queue *fpeq;
9716 struct lpfc_eqe *eqe;
9717 unsigned long iflags;
9720 if (phba->sli_rev == LPFC_SLI_REV4) {
9721 pring = lpfc_sli4_calc_ring(phba, piocb);
9722 if (unlikely(pring == NULL))
9725 spin_lock_irqsave(&pring->ring_lock, iflags);
9726 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9727 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9729 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
9730 idx = piocb->hba_wqidx;
9731 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
9733 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
9735 /* Get associated EQ with this index */
9736 fpeq = phba->sli4_hba.hba_eq[idx];
9738 /* Turn off interrupts from this EQ */
9739 phba->sli4_hba.sli4_eq_clr_intr(fpeq);
9742 * Process all the events on FCP EQ
9744 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9745 lpfc_sli4_hba_handle_eqe(phba,
9747 fpeq->EQ_processed++;
9750 /* Always clear and re-arm the EQ */
9751 phba->sli4_hba.sli4_eq_release(fpeq,
9754 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
9757 /* For now, SLI2/3 will still use hbalock */
9758 spin_lock_irqsave(&phba->hbalock, iflags);
9759 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9760 spin_unlock_irqrestore(&phba->hbalock, iflags);
9766 * lpfc_extra_ring_setup - Extra ring setup function
9767 * @phba: Pointer to HBA context object.
9769 * This function is called while driver attaches with the
9770 * HBA to setup the extra ring. The extra ring is used
9771 * only when driver needs to support target mode functionality
9772 * or IP over FC functionalities.
9774 * This function is called with no lock held. SLI3 only.
9777 lpfc_extra_ring_setup( struct lpfc_hba *phba)
9779 struct lpfc_sli *psli;
9780 struct lpfc_sli_ring *pring;
9784 /* Adjust cmd/rsp ring iocb entries more evenly */
9786 /* Take some away from the FCP ring */
9787 pring = &psli->sli3_ring[LPFC_FCP_RING];
9788 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9789 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9790 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9791 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9793 /* and give them to the extra ring */
9794 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
9796 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9797 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9798 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9799 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9801 /* Setup default profile for this ring */
9802 pring->iotag_max = 4096;
9803 pring->num_mask = 1;
9804 pring->prt[0].profile = 0; /* Mask 0 */
9805 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
9806 pring->prt[0].type = phba->cfg_multi_ring_type;
9807 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
9811 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
9812 * @phba: Pointer to HBA context object.
9813 * @iocbq: Pointer to iocb object.
9815 * The async_event handler calls this routine when it receives
9816 * an ASYNC_STATUS_CN event from the port. The port generates
9817 * this event when an Abort Sequence request to an rport fails
9818 * twice in succession. The abort could be originated by the
9819 * driver or by the port. The ABTS could have been for an ELS
9820 * or FCP IO. The port only generates this event when an ABTS
9821 * fails to complete after one retry.
9824 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
9825 struct lpfc_iocbq *iocbq)
9827 struct lpfc_nodelist *ndlp = NULL;
9828 uint16_t rpi = 0, vpi = 0;
9829 struct lpfc_vport *vport = NULL;
9831 /* The rpi in the ulpContext is vport-sensitive. */
9832 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
9833 rpi = iocbq->iocb.ulpContext;
9835 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9836 "3092 Port generated ABTS async event "
9837 "on vpi %d rpi %d status 0x%x\n",
9838 vpi, rpi, iocbq->iocb.ulpStatus);
9840 vport = lpfc_find_vport_by_vpid(phba, vpi);
9843 ndlp = lpfc_findnode_rpi(vport, rpi);
9844 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
9847 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9848 lpfc_sli_abts_recover_port(vport, ndlp);
9852 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9853 "3095 Event Context not found, no "
9854 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9855 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9859 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
9860 * @phba: pointer to HBA context object.
9861 * @ndlp: nodelist pointer for the impacted rport.
9862 * @axri: pointer to the wcqe containing the failed exchange.
9864 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
9865 * port. The port generates this event when an abort exchange request to an
9866 * rport fails twice in succession with no reply. The abort could be originated
9867 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
9870 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9871 struct lpfc_nodelist *ndlp,
9872 struct sli4_wcqe_xri_aborted *axri)
9874 struct lpfc_vport *vport;
9875 uint32_t ext_status = 0;
9877 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
9878 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9879 "3115 Node Context not found, driver "
9880 "ignoring abts err event\n");
9884 vport = ndlp->vport;
9885 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9886 "3116 Port generated FCP XRI ABORT event on "
9887 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
9888 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
9889 bf_get(lpfc_wcqe_xa_xri, axri),
9890 bf_get(lpfc_wcqe_xa_status, axri),
9894 * Catch the ABTS protocol failure case. Older OCe FW releases returned
9895 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
9896 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
9898 ext_status = axri->parameter & IOERR_PARAM_MASK;
9899 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9900 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
9901 lpfc_sli_abts_recover_port(vport, ndlp);
9905 * lpfc_sli_async_event_handler - ASYNC iocb handler function
9906 * @phba: Pointer to HBA context object.
9907 * @pring: Pointer to driver SLI ring object.
9908 * @iocbq: Pointer to iocb object.
9910 * This function is called by the slow ring event handler
9911 * function when there is an ASYNC event iocb in the ring.
9912 * This function is called with no lock held.
9913 * Currently this function handles only temperature related
9914 * ASYNC events. The function decodes the temperature sensor
9915 * event message and posts events for the management applications.
9918 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9919 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9923 struct temp_event temp_event_data;
9924 struct Scsi_Host *shost;
9927 icmd = &iocbq->iocb;
9928 evt_code = icmd->un.asyncstat.evt_code;
9931 case ASYNC_TEMP_WARN:
9932 case ASYNC_TEMP_SAFE:
9933 temp_event_data.data = (uint32_t) icmd->ulpContext;
9934 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9935 if (evt_code == ASYNC_TEMP_WARN) {
9936 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9937 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9938 "0347 Adapter is very hot, please take "
9939 "corrective action. temperature : %d Celsius\n",
9940 (uint32_t) icmd->ulpContext);
9942 temp_event_data.event_code = LPFC_NORMAL_TEMP;
9943 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9944 "0340 Adapter temperature is OK now. "
9945 "temperature : %d Celsius\n",
9946 (uint32_t) icmd->ulpContext);
9949 /* Send temperature change event to applications */
9950 shost = lpfc_shost_from_vport(phba->pport);
9951 fc_host_post_vendor_event(shost, fc_get_event_number(),
9952 sizeof(temp_event_data), (char *) &temp_event_data,
9955 case ASYNC_STATUS_CN:
9956 lpfc_sli_abts_err_handler(phba, iocbq);
9959 iocb_w = (uint32_t *) icmd;
9960 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9961 "0346 Ring %d handler: unexpected ASYNC_STATUS"
9963 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
9964 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
9965 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
9966 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
9967 pring->ringno, icmd->un.asyncstat.evt_code,
9968 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9969 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9970 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9971 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9979 * lpfc_sli4_setup - SLI ring setup function
9980 * @phba: Pointer to HBA context object.
9982 * lpfc_sli_setup sets up rings of the SLI interface with
9983 * number of iocbs per ring and iotags. This function is
9984 * called while driver attach to the HBA and before the
9985 * interrupts are enabled. So there is no need for locking.
9987 * This function always returns 0.
9990 lpfc_sli4_setup(struct lpfc_hba *phba)
9992 struct lpfc_sli_ring *pring;
9994 pring = phba->sli4_hba.els_wq->pring;
9995 pring->num_mask = LPFC_MAX_RING_MASK;
9996 pring->prt[0].profile = 0; /* Mask 0 */
9997 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9998 pring->prt[0].type = FC_TYPE_ELS;
9999 pring->prt[0].lpfc_sli_rcv_unsol_event =
10000 lpfc_els_unsol_event;
10001 pring->prt[1].profile = 0; /* Mask 1 */
10002 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10003 pring->prt[1].type = FC_TYPE_ELS;
10004 pring->prt[1].lpfc_sli_rcv_unsol_event =
10005 lpfc_els_unsol_event;
10006 pring->prt[2].profile = 0; /* Mask 2 */
10007 /* NameServer Inquiry */
10008 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10010 pring->prt[2].type = FC_TYPE_CT;
10011 pring->prt[2].lpfc_sli_rcv_unsol_event =
10012 lpfc_ct_unsol_event;
10013 pring->prt[3].profile = 0; /* Mask 3 */
10014 /* NameServer response */
10015 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10017 pring->prt[3].type = FC_TYPE_CT;
10018 pring->prt[3].lpfc_sli_rcv_unsol_event =
10019 lpfc_ct_unsol_event;
10024 * lpfc_sli_setup - SLI ring setup function
10025 * @phba: Pointer to HBA context object.
10027 * lpfc_sli_setup sets up rings of the SLI interface with
10028 * number of iocbs per ring and iotags. This function is
10029 * called while driver attach to the HBA and before the
10030 * interrupts are enabled. So there is no need for locking.
10032 * This function always returns 0. SLI3 only.
10035 lpfc_sli_setup(struct lpfc_hba *phba)
10037 int i, totiocbsize = 0;
10038 struct lpfc_sli *psli = &phba->sli;
10039 struct lpfc_sli_ring *pring;
10041 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10042 psli->sli_flag = 0;
10044 psli->iocbq_lookup = NULL;
10045 psli->iocbq_lookup_len = 0;
10046 psli->last_iotag = 0;
10048 for (i = 0; i < psli->num_rings; i++) {
10049 pring = &psli->sli3_ring[i];
10051 case LPFC_FCP_RING: /* ring 0 - FCP */
10052 /* numCiocb and numRiocb are used in config_port */
10053 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10054 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10055 pring->sli.sli3.numCiocb +=
10056 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10057 pring->sli.sli3.numRiocb +=
10058 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10059 pring->sli.sli3.numCiocb +=
10060 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10061 pring->sli.sli3.numRiocb +=
10062 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10063 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10064 SLI3_IOCB_CMD_SIZE :
10065 SLI2_IOCB_CMD_SIZE;
10066 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10067 SLI3_IOCB_RSP_SIZE :
10068 SLI2_IOCB_RSP_SIZE;
10069 pring->iotag_ctr = 0;
10071 (phba->cfg_hba_queue_depth * 2);
10072 pring->fast_iotag = pring->iotag_max;
10073 pring->num_mask = 0;
10075 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
10076 /* numCiocb and numRiocb are used in config_port */
10077 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10078 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10079 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10080 SLI3_IOCB_CMD_SIZE :
10081 SLI2_IOCB_CMD_SIZE;
10082 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10083 SLI3_IOCB_RSP_SIZE :
10084 SLI2_IOCB_RSP_SIZE;
10085 pring->iotag_max = phba->cfg_hba_queue_depth;
10086 pring->num_mask = 0;
10088 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10089 /* numCiocb and numRiocb are used in config_port */
10090 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10091 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10092 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10093 SLI3_IOCB_CMD_SIZE :
10094 SLI2_IOCB_CMD_SIZE;
10095 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10096 SLI3_IOCB_RSP_SIZE :
10097 SLI2_IOCB_RSP_SIZE;
10098 pring->fast_iotag = 0;
10099 pring->iotag_ctr = 0;
10100 pring->iotag_max = 4096;
10101 pring->lpfc_sli_rcv_async_status =
10102 lpfc_sli_async_event_handler;
10103 pring->num_mask = LPFC_MAX_RING_MASK;
10104 pring->prt[0].profile = 0; /* Mask 0 */
10105 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10106 pring->prt[0].type = FC_TYPE_ELS;
10107 pring->prt[0].lpfc_sli_rcv_unsol_event =
10108 lpfc_els_unsol_event;
10109 pring->prt[1].profile = 0; /* Mask 1 */
10110 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10111 pring->prt[1].type = FC_TYPE_ELS;
10112 pring->prt[1].lpfc_sli_rcv_unsol_event =
10113 lpfc_els_unsol_event;
10114 pring->prt[2].profile = 0; /* Mask 2 */
10115 /* NameServer Inquiry */
10116 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10118 pring->prt[2].type = FC_TYPE_CT;
10119 pring->prt[2].lpfc_sli_rcv_unsol_event =
10120 lpfc_ct_unsol_event;
10121 pring->prt[3].profile = 0; /* Mask 3 */
10122 /* NameServer response */
10123 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10125 pring->prt[3].type = FC_TYPE_CT;
10126 pring->prt[3].lpfc_sli_rcv_unsol_event =
10127 lpfc_ct_unsol_event;
10130 totiocbsize += (pring->sli.sli3.numCiocb *
10131 pring->sli.sli3.sizeCiocb) +
10132 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10134 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10135 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10136 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10137 "SLI2 SLIM Data: x%x x%lx\n",
10138 phba->brd_no, totiocbsize,
10139 (unsigned long) MAX_SLIM_IOCB_SIZE);
10141 if (phba->cfg_multi_ring_support == 2)
10142 lpfc_extra_ring_setup(phba);
10148 * lpfc_sli4_queue_init - Queue initialization function
10149 * @phba: Pointer to HBA context object.
10151 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10152 * ring. This function also initializes ring indices of each ring.
10153 * This function is called during the initialization of the SLI
10154 * interface of an HBA.
10155 * This function is called with no lock held and always returns
10159 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10161 struct lpfc_sli *psli;
10162 struct lpfc_sli_ring *pring;
10166 spin_lock_irq(&phba->hbalock);
10167 INIT_LIST_HEAD(&psli->mboxq);
10168 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10169 /* Initialize list headers for txq and txcmplq as double linked lists */
10170 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
10171 pring = phba->sli4_hba.fcp_wq[i]->pring;
10173 pring->ringno = LPFC_FCP_RING;
10174 INIT_LIST_HEAD(&pring->txq);
10175 INIT_LIST_HEAD(&pring->txcmplq);
10176 INIT_LIST_HEAD(&pring->iocb_continueq);
10177 spin_lock_init(&pring->ring_lock);
10179 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
10180 pring = phba->sli4_hba.nvme_wq[i]->pring;
10182 pring->ringno = LPFC_FCP_RING;
10183 INIT_LIST_HEAD(&pring->txq);
10184 INIT_LIST_HEAD(&pring->txcmplq);
10185 INIT_LIST_HEAD(&pring->iocb_continueq);
10186 spin_lock_init(&pring->ring_lock);
10188 pring = phba->sli4_hba.els_wq->pring;
10190 pring->ringno = LPFC_ELS_RING;
10191 INIT_LIST_HEAD(&pring->txq);
10192 INIT_LIST_HEAD(&pring->txcmplq);
10193 INIT_LIST_HEAD(&pring->iocb_continueq);
10194 spin_lock_init(&pring->ring_lock);
10196 if (phba->cfg_nvme_io_channel) {
10197 pring = phba->sli4_hba.nvmels_wq->pring;
10199 pring->ringno = LPFC_ELS_RING;
10200 INIT_LIST_HEAD(&pring->txq);
10201 INIT_LIST_HEAD(&pring->txcmplq);
10202 INIT_LIST_HEAD(&pring->iocb_continueq);
10203 spin_lock_init(&pring->ring_lock);
10206 if (phba->cfg_fof) {
10207 pring = phba->sli4_hba.oas_wq->pring;
10209 pring->ringno = LPFC_FCP_RING;
10210 INIT_LIST_HEAD(&pring->txq);
10211 INIT_LIST_HEAD(&pring->txcmplq);
10212 INIT_LIST_HEAD(&pring->iocb_continueq);
10213 spin_lock_init(&pring->ring_lock);
10216 spin_unlock_irq(&phba->hbalock);
10220 * lpfc_sli_queue_init - Queue initialization function
10221 * @phba: Pointer to HBA context object.
10223 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10224 * ring. This function also initializes ring indices of each ring.
10225 * This function is called during the initialization of the SLI
10226 * interface of an HBA.
10227 * This function is called with no lock held and always returns
10231 lpfc_sli_queue_init(struct lpfc_hba *phba)
10233 struct lpfc_sli *psli;
10234 struct lpfc_sli_ring *pring;
10238 spin_lock_irq(&phba->hbalock);
10239 INIT_LIST_HEAD(&psli->mboxq);
10240 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10241 /* Initialize list headers for txq and txcmplq as double linked lists */
10242 for (i = 0; i < psli->num_rings; i++) {
10243 pring = &psli->sli3_ring[i];
10245 pring->sli.sli3.next_cmdidx = 0;
10246 pring->sli.sli3.local_getidx = 0;
10247 pring->sli.sli3.cmdidx = 0;
10248 INIT_LIST_HEAD(&pring->iocb_continueq);
10249 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10250 INIT_LIST_HEAD(&pring->postbufq);
10252 INIT_LIST_HEAD(&pring->txq);
10253 INIT_LIST_HEAD(&pring->txcmplq);
10254 spin_lock_init(&pring->ring_lock);
10256 spin_unlock_irq(&phba->hbalock);
10260 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10261 * @phba: Pointer to HBA context object.
10263 * This routine flushes the mailbox command subsystem. It will unconditionally
10264 * flush all the mailbox commands in the three possible stages in the mailbox
10265 * command sub-system: pending mailbox command queue; the outstanding mailbox
10266 * command; and completed mailbox command queue. It is caller's responsibility
10267 * to make sure that the driver is in the proper state to flush the mailbox
10268 * command sub-system. Namely, the posting of mailbox commands into the
10269 * pending mailbox command queue from the various clients must be stopped;
10270 * either the HBA is in a state that it will never works on the outstanding
10271 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10272 * mailbox command has been completed.
10275 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10277 LIST_HEAD(completions);
10278 struct lpfc_sli *psli = &phba->sli;
10280 unsigned long iflag;
10282 /* Flush all the mailbox commands in the mbox system */
10283 spin_lock_irqsave(&phba->hbalock, iflag);
10284 /* The pending mailbox command queue */
10285 list_splice_init(&phba->sli.mboxq, &completions);
10286 /* The outstanding active mailbox command */
10287 if (psli->mbox_active) {
10288 list_add_tail(&psli->mbox_active->list, &completions);
10289 psli->mbox_active = NULL;
10290 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10292 /* The completed mailbox command queue */
10293 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10294 spin_unlock_irqrestore(&phba->hbalock, iflag);
10296 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10297 while (!list_empty(&completions)) {
10298 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10299 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10300 if (pmb->mbox_cmpl)
10301 pmb->mbox_cmpl(phba, pmb);
10306 * lpfc_sli_host_down - Vport cleanup function
10307 * @vport: Pointer to virtual port object.
10309 * lpfc_sli_host_down is called to clean up the resources
10310 * associated with a vport before destroying virtual
10311 * port data structures.
10312 * This function does following operations:
10313 * - Free discovery resources associated with this virtual
10315 * - Free iocbs associated with this virtual port in
10317 * - Send abort for all iocb commands associated with this
10318 * vport in txcmplq.
10320 * This function is called with no lock held and always returns 1.
10323 lpfc_sli_host_down(struct lpfc_vport *vport)
10325 LIST_HEAD(completions);
10326 struct lpfc_hba *phba = vport->phba;
10327 struct lpfc_sli *psli = &phba->sli;
10328 struct lpfc_queue *qp = NULL;
10329 struct lpfc_sli_ring *pring;
10330 struct lpfc_iocbq *iocb, *next_iocb;
10332 unsigned long flags = 0;
10333 uint16_t prev_pring_flag;
10335 lpfc_cleanup_discovery_resources(vport);
10337 spin_lock_irqsave(&phba->hbalock, flags);
10340 * Error everything on the txq since these iocbs
10341 * have not been given to the FW yet.
10342 * Also issue ABTS for everything on the txcmplq
10344 if (phba->sli_rev != LPFC_SLI_REV4) {
10345 for (i = 0; i < psli->num_rings; i++) {
10346 pring = &psli->sli3_ring[i];
10347 prev_pring_flag = pring->flag;
10348 /* Only slow rings */
10349 if (pring->ringno == LPFC_ELS_RING) {
10350 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10351 /* Set the lpfc data pending flag */
10352 set_bit(LPFC_DATA_READY, &phba->data_flags);
10354 list_for_each_entry_safe(iocb, next_iocb,
10355 &pring->txq, list) {
10356 if (iocb->vport != vport)
10358 list_move_tail(&iocb->list, &completions);
10360 list_for_each_entry_safe(iocb, next_iocb,
10361 &pring->txcmplq, list) {
10362 if (iocb->vport != vport)
10364 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10366 pring->flag = prev_pring_flag;
10369 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10373 if (pring == phba->sli4_hba.els_wq->pring) {
10374 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10375 /* Set the lpfc data pending flag */
10376 set_bit(LPFC_DATA_READY, &phba->data_flags);
10378 prev_pring_flag = pring->flag;
10379 spin_lock_irq(&pring->ring_lock);
10380 list_for_each_entry_safe(iocb, next_iocb,
10381 &pring->txq, list) {
10382 if (iocb->vport != vport)
10384 list_move_tail(&iocb->list, &completions);
10386 spin_unlock_irq(&pring->ring_lock);
10387 list_for_each_entry_safe(iocb, next_iocb,
10388 &pring->txcmplq, list) {
10389 if (iocb->vport != vport)
10391 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10393 pring->flag = prev_pring_flag;
10396 spin_unlock_irqrestore(&phba->hbalock, flags);
10398 /* Cancel all the IOCBs from the completions list */
10399 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10405 * lpfc_sli_hba_down - Resource cleanup function for the HBA
10406 * @phba: Pointer to HBA context object.
10408 * This function cleans up all iocb, buffers, mailbox commands
10409 * while shutting down the HBA. This function is called with no
10410 * lock held and always returns 1.
10411 * This function does the following to cleanup driver resources:
10412 * - Free discovery resources for each virtual port
10413 * - Cleanup any pending fabric iocbs
10414 * - Iterate through the iocb txq and free each entry
10416 * - Free up any buffer posted to the HBA
10417 * - Free mailbox commands in the mailbox queue.
10420 lpfc_sli_hba_down(struct lpfc_hba *phba)
10422 LIST_HEAD(completions);
10423 struct lpfc_sli *psli = &phba->sli;
10424 struct lpfc_queue *qp = NULL;
10425 struct lpfc_sli_ring *pring;
10426 struct lpfc_dmabuf *buf_ptr;
10427 unsigned long flags = 0;
10430 /* Shutdown the mailbox command sub-system */
10431 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10433 lpfc_hba_down_prep(phba);
10435 lpfc_fabric_abort_hba(phba);
10437 spin_lock_irqsave(&phba->hbalock, flags);
10440 * Error everything on the txq since these iocbs
10441 * have not been given to the FW yet.
10443 if (phba->sli_rev != LPFC_SLI_REV4) {
10444 for (i = 0; i < psli->num_rings; i++) {
10445 pring = &psli->sli3_ring[i];
10446 /* Only slow rings */
10447 if (pring->ringno == LPFC_ELS_RING) {
10448 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10449 /* Set the lpfc data pending flag */
10450 set_bit(LPFC_DATA_READY, &phba->data_flags);
10452 list_splice_init(&pring->txq, &completions);
10455 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10459 spin_lock_irq(&pring->ring_lock);
10460 list_splice_init(&pring->txq, &completions);
10461 spin_unlock_irq(&pring->ring_lock);
10462 if (pring == phba->sli4_hba.els_wq->pring) {
10463 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10464 /* Set the lpfc data pending flag */
10465 set_bit(LPFC_DATA_READY, &phba->data_flags);
10469 spin_unlock_irqrestore(&phba->hbalock, flags);
10471 /* Cancel all the IOCBs from the completions list */
10472 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10475 spin_lock_irqsave(&phba->hbalock, flags);
10476 list_splice_init(&phba->elsbuf, &completions);
10477 phba->elsbuf_cnt = 0;
10478 phba->elsbuf_prev_cnt = 0;
10479 spin_unlock_irqrestore(&phba->hbalock, flags);
10481 while (!list_empty(&completions)) {
10482 list_remove_head(&completions, buf_ptr,
10483 struct lpfc_dmabuf, list);
10484 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10488 /* Return any active mbox cmds */
10489 del_timer_sync(&psli->mbox_tmo);
10491 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10492 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10493 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10499 * lpfc_sli_pcimem_bcopy - SLI memory copy function
10500 * @srcp: Source memory pointer.
10501 * @destp: Destination memory pointer.
10502 * @cnt: Number of words required to be copied.
10504 * This function is used for copying data between driver memory
10505 * and the SLI memory. This function also changes the endianness
10506 * of each word if native endianness is different from SLI
10507 * endianness. This function can be called with or without
10511 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10513 uint32_t *src = srcp;
10514 uint32_t *dest = destp;
10518 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10520 ldata = le32_to_cpu(ldata);
10529 * lpfc_sli_bemem_bcopy - SLI memory copy function
10530 * @srcp: Source memory pointer.
10531 * @destp: Destination memory pointer.
10532 * @cnt: Number of words required to be copied.
10534 * This function is used for copying data between a data structure
10535 * with big endian representation to local endianness.
10536 * This function can be called with or without lock.
10539 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10541 uint32_t *src = srcp;
10542 uint32_t *dest = destp;
10546 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10548 ldata = be32_to_cpu(ldata);
10556 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10557 * @phba: Pointer to HBA context object.
10558 * @pring: Pointer to driver SLI ring object.
10559 * @mp: Pointer to driver buffer object.
10561 * This function is called with no lock held.
10562 * It always return zero after adding the buffer to the postbufq
10566 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10567 struct lpfc_dmabuf *mp)
10569 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10571 spin_lock_irq(&phba->hbalock);
10572 list_add_tail(&mp->list, &pring->postbufq);
10573 pring->postbufq_cnt++;
10574 spin_unlock_irq(&phba->hbalock);
10579 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10580 * @phba: Pointer to HBA context object.
10582 * When HBQ is enabled, buffers are searched based on tags. This function
10583 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10584 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10585 * does not conflict with tags of buffer posted for unsolicited events.
10586 * The function returns the allocated tag. The function is called with
10590 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10592 spin_lock_irq(&phba->hbalock);
10593 phba->buffer_tag_count++;
10595 * Always set the QUE_BUFTAG_BIT to distiguish between
10596 * a tag assigned by HBQ.
10598 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10599 spin_unlock_irq(&phba->hbalock);
10600 return phba->buffer_tag_count;
10604 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
10605 * @phba: Pointer to HBA context object.
10606 * @pring: Pointer to driver SLI ring object.
10607 * @tag: Buffer tag.
10609 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10610 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10611 * iocb is posted to the response ring with the tag of the buffer.
10612 * This function searches the pring->postbufq list using the tag
10613 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10614 * iocb. If the buffer is found then lpfc_dmabuf object of the
10615 * buffer is returned to the caller else NULL is returned.
10616 * This function is called with no lock held.
10618 struct lpfc_dmabuf *
10619 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10622 struct lpfc_dmabuf *mp, *next_mp;
10623 struct list_head *slp = &pring->postbufq;
10625 /* Search postbufq, from the beginning, looking for a match on tag */
10626 spin_lock_irq(&phba->hbalock);
10627 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10628 if (mp->buffer_tag == tag) {
10629 list_del_init(&mp->list);
10630 pring->postbufq_cnt--;
10631 spin_unlock_irq(&phba->hbalock);
10636 spin_unlock_irq(&phba->hbalock);
10637 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10638 "0402 Cannot find virtual addr for buffer tag on "
10639 "ring %d Data x%lx x%p x%p x%x\n",
10640 pring->ringno, (unsigned long) tag,
10641 slp->next, slp->prev, pring->postbufq_cnt);
10647 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
10648 * @phba: Pointer to HBA context object.
10649 * @pring: Pointer to driver SLI ring object.
10650 * @phys: DMA address of the buffer.
10652 * This function searches the buffer list using the dma_address
10653 * of unsolicited event to find the driver's lpfc_dmabuf object
10654 * corresponding to the dma_address. The function returns the
10655 * lpfc_dmabuf object if a buffer is found else it returns NULL.
10656 * This function is called by the ct and els unsolicited event
10657 * handlers to get the buffer associated with the unsolicited
10660 * This function is called with no lock held.
10662 struct lpfc_dmabuf *
10663 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10666 struct lpfc_dmabuf *mp, *next_mp;
10667 struct list_head *slp = &pring->postbufq;
10669 /* Search postbufq, from the beginning, looking for a match on phys */
10670 spin_lock_irq(&phba->hbalock);
10671 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10672 if (mp->phys == phys) {
10673 list_del_init(&mp->list);
10674 pring->postbufq_cnt--;
10675 spin_unlock_irq(&phba->hbalock);
10680 spin_unlock_irq(&phba->hbalock);
10681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10682 "0410 Cannot find virtual addr for mapped buf on "
10683 "ring %d Data x%llx x%p x%p x%x\n",
10684 pring->ringno, (unsigned long long)phys,
10685 slp->next, slp->prev, pring->postbufq_cnt);
10690 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
10691 * @phba: Pointer to HBA context object.
10692 * @cmdiocb: Pointer to driver command iocb object.
10693 * @rspiocb: Pointer to driver response iocb object.
10695 * This function is the completion handler for the abort iocbs for
10696 * ELS commands. This function is called from the ELS ring event
10697 * handler with no lock held. This function frees memory resources
10698 * associated with the abort iocb.
10701 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10702 struct lpfc_iocbq *rspiocb)
10704 IOCB_t *irsp = &rspiocb->iocb;
10705 uint16_t abort_iotag, abort_context;
10706 struct lpfc_iocbq *abort_iocb = NULL;
10708 if (irsp->ulpStatus) {
10711 * Assume that the port already completed and returned, or
10712 * will return the iocb. Just Log the message.
10714 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
10715 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
10717 spin_lock_irq(&phba->hbalock);
10718 if (phba->sli_rev < LPFC_SLI_REV4) {
10719 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
10720 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
10721 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
10722 spin_unlock_irq(&phba->hbalock);
10725 if (abort_iotag != 0 &&
10726 abort_iotag <= phba->sli.last_iotag)
10728 phba->sli.iocbq_lookup[abort_iotag];
10730 /* For sli4 the abort_tag is the XRI,
10731 * so the abort routine puts the iotag of the iocb
10732 * being aborted in the context field of the abort
10735 abort_iocb = phba->sli.iocbq_lookup[abort_context];
10737 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
10738 "0327 Cannot abort els iocb %p "
10739 "with tag %x context %x, abort status %x, "
10741 abort_iocb, abort_iotag, abort_context,
10742 irsp->ulpStatus, irsp->un.ulpWord[4]);
10744 spin_unlock_irq(&phba->hbalock);
10747 lpfc_sli_release_iocbq(phba, cmdiocb);
10752 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
10753 * @phba: Pointer to HBA context object.
10754 * @cmdiocb: Pointer to driver command iocb object.
10755 * @rspiocb: Pointer to driver response iocb object.
10757 * The function is called from SLI ring event handler with no
10758 * lock held. This function is the completion handler for ELS commands
10759 * which are aborted. The function frees memory resources used for
10760 * the aborted ELS commands.
10763 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10764 struct lpfc_iocbq *rspiocb)
10766 IOCB_t *irsp = &rspiocb->iocb;
10768 /* ELS cmd tag <ulpIoTag> completes */
10769 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10770 "0139 Ignoring ELS cmd tag x%x completion Data: "
10772 irsp->ulpIoTag, irsp->ulpStatus,
10773 irsp->un.ulpWord[4], irsp->ulpTimeout);
10774 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
10775 lpfc_ct_free_iocb(phba, cmdiocb);
10777 lpfc_els_free_iocb(phba, cmdiocb);
10782 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
10783 * @phba: Pointer to HBA context object.
10784 * @pring: Pointer to driver SLI ring object.
10785 * @cmdiocb: Pointer to driver command iocb object.
10787 * This function issues an abort iocb for the provided command iocb down to
10788 * the port. Other than the case the outstanding command iocb is an abort
10789 * request, this function issues abort out unconditionally. This function is
10790 * called with hbalock held. The function returns 0 when it fails due to
10791 * memory allocation failure or when the command iocb is an abort request.
10794 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10795 struct lpfc_iocbq *cmdiocb)
10797 struct lpfc_vport *vport = cmdiocb->vport;
10798 struct lpfc_iocbq *abtsiocbp;
10799 IOCB_t *icmd = NULL;
10800 IOCB_t *iabt = NULL;
10802 unsigned long iflags;
10803 struct lpfc_nodelist *ndlp;
10805 lockdep_assert_held(&phba->hbalock);
10808 * There are certain command types we don't want to abort. And we
10809 * don't want to abort commands that are already in the process of
10812 icmd = &cmdiocb->iocb;
10813 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10814 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10815 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10818 /* issue ABTS for this IOCB based on iotag */
10819 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10820 if (abtsiocbp == NULL)
10823 /* This signals the response to set the correct status
10824 * before calling the completion handler
10826 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10828 iabt = &abtsiocbp->iocb;
10829 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
10830 iabt->un.acxri.abortContextTag = icmd->ulpContext;
10831 if (phba->sli_rev == LPFC_SLI_REV4) {
10832 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
10833 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
10835 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
10836 if (pring->ringno == LPFC_ELS_RING) {
10837 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
10838 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
10842 iabt->ulpClass = icmd->ulpClass;
10844 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10845 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
10846 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
10847 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
10848 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
10849 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
10851 if (phba->link_state >= LPFC_LINK_UP)
10852 iabt->ulpCommand = CMD_ABORT_XRI_CN;
10854 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
10856 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
10857 abtsiocbp->vport = vport;
10859 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10860 "0339 Abort xri x%x, original iotag x%x, "
10861 "abort cmd iotag x%x\n",
10862 iabt->un.acxri.abortIoTag,
10863 iabt->un.acxri.abortContextTag,
10866 if (phba->sli_rev == LPFC_SLI_REV4) {
10867 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
10868 if (unlikely(pring == NULL))
10870 /* Note: both hbalock and ring_lock need to be set here */
10871 spin_lock_irqsave(&pring->ring_lock, iflags);
10872 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10874 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10876 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10881 __lpfc_sli_release_iocbq(phba, abtsiocbp);
10884 * Caller to this routine should check for IOCB_ERROR
10885 * and handle it properly. This routine no longer removes
10886 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10892 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
10893 * @phba: Pointer to HBA context object.
10894 * @pring: Pointer to driver SLI ring object.
10895 * @cmdiocb: Pointer to driver command iocb object.
10897 * This function issues an abort iocb for the provided command iocb. In case
10898 * of unloading, the abort iocb will not be issued to commands on the ELS
10899 * ring. Instead, the callback function shall be changed to those commands
10900 * so that nothing happens when them finishes. This function is called with
10901 * hbalock held. The function returns 0 when the command iocb is an abort
10905 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10906 struct lpfc_iocbq *cmdiocb)
10908 struct lpfc_vport *vport = cmdiocb->vport;
10909 int retval = IOCB_ERROR;
10910 IOCB_t *icmd = NULL;
10912 lockdep_assert_held(&phba->hbalock);
10915 * There are certain command types we don't want to abort. And we
10916 * don't want to abort commands that are already in the process of
10919 icmd = &cmdiocb->iocb;
10920 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10921 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10922 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10926 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10927 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10929 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10930 goto abort_iotag_exit;
10934 * If we're unloading, don't abort iocb on the ELS ring, but change
10935 * the callback so that nothing happens when it finishes.
10937 if ((vport->load_flag & FC_UNLOADING) &&
10938 (pring->ringno == LPFC_ELS_RING)) {
10939 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10940 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10942 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10943 goto abort_iotag_exit;
10946 /* Now, we try to issue the abort to the cmdiocb out */
10947 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
10951 * Caller to this routine should check for IOCB_ERROR
10952 * and handle it properly. This routine no longer removes
10953 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10959 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
10960 * @phba: Pointer to HBA context object.
10961 * @pring: Pointer to driver SLI ring object.
10962 * @cmdiocb: Pointer to driver command iocb object.
10964 * This function issues an abort iocb for the provided command iocb down to
10965 * the port. Other than the case the outstanding command iocb is an abort
10966 * request, this function issues abort out unconditionally. This function is
10967 * called with hbalock held. The function returns 0 when it fails due to
10968 * memory allocation failure or when the command iocb is an abort request.
10971 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10972 struct lpfc_iocbq *cmdiocb)
10974 struct lpfc_vport *vport = cmdiocb->vport;
10975 struct lpfc_iocbq *abtsiocbp;
10976 union lpfc_wqe128 *abts_wqe;
10980 * There are certain command types we don't want to abort. And we
10981 * don't want to abort commands that are already in the process of
10984 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10985 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
10986 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10989 /* issue ABTS for this io based on iotag */
10990 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10991 if (abtsiocbp == NULL)
10994 /* This signals the response to set the correct status
10995 * before calling the completion handler
10997 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10999 /* Complete prepping the abort wqe and issue to the FW. */
11000 abts_wqe = &abtsiocbp->wqe;
11002 /* Clear any stale WQE contents */
11003 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
11004 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
11007 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
11008 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
11009 cmdiocb->iocb.ulpClass);
11011 /* word 8 - tell the FW to abort the IO associated with this
11012 * outstanding exchange ID.
11014 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
11016 /* word 9 - this is the iotag for the abts_wqe completion. */
11017 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
11021 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
11022 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
11025 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11026 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
11027 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
11029 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11030 abtsiocbp->iocb_flag |= LPFC_IO_NVME;
11031 abtsiocbp->vport = vport;
11032 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
11033 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
11035 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11036 "6147 Failed abts issue_wqe with status x%x "
11038 retval, cmdiocb->sli4_xritag);
11039 lpfc_sli_release_iocbq(phba, abtsiocbp);
11043 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11044 "6148 Drv Abort NVME Request Issued for "
11045 "ox_id x%x on reqtag x%x\n",
11046 cmdiocb->sli4_xritag,
11053 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11054 * @phba: pointer to lpfc HBA data structure.
11056 * This routine will abort all pending and outstanding iocbs to an HBA.
11059 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11061 struct lpfc_sli *psli = &phba->sli;
11062 struct lpfc_sli_ring *pring;
11063 struct lpfc_queue *qp = NULL;
11066 if (phba->sli_rev != LPFC_SLI_REV4) {
11067 for (i = 0; i < psli->num_rings; i++) {
11068 pring = &psli->sli3_ring[i];
11069 lpfc_sli_abort_iocb_ring(phba, pring);
11073 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11077 lpfc_sli_abort_iocb_ring(phba, pring);
11082 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11083 * @iocbq: Pointer to driver iocb object.
11084 * @vport: Pointer to driver virtual port object.
11085 * @tgt_id: SCSI ID of the target.
11086 * @lun_id: LUN ID of the scsi device.
11087 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11089 * This function acts as an iocb filter for functions which abort or count
11090 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11091 * 0 if the filtering criteria is met for the given iocb and will return
11092 * 1 if the filtering criteria is not met.
11093 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11094 * given iocb is for the SCSI device specified by vport, tgt_id and
11095 * lun_id parameter.
11096 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11097 * given iocb is for the SCSI target specified by vport and tgt_id
11099 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11100 * given iocb is for the SCSI host associated with the given vport.
11101 * This function is called with no locks held.
11104 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11105 uint16_t tgt_id, uint64_t lun_id,
11106 lpfc_ctx_cmd ctx_cmd)
11108 struct lpfc_scsi_buf *lpfc_cmd;
11111 if (iocbq->vport != vport)
11114 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11115 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11118 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11120 if (lpfc_cmd->pCmd == NULL)
11125 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11126 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11127 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11131 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11132 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11135 case LPFC_CTX_HOST:
11139 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11140 __func__, ctx_cmd);
11148 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11149 * @vport: Pointer to virtual port.
11150 * @tgt_id: SCSI ID of the target.
11151 * @lun_id: LUN ID of the scsi device.
11152 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11154 * This function returns number of FCP commands pending for the vport.
11155 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11156 * commands pending on the vport associated with SCSI device specified
11157 * by tgt_id and lun_id parameters.
11158 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11159 * commands pending on the vport associated with SCSI target specified
11160 * by tgt_id parameter.
11161 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11162 * commands pending on the vport.
11163 * This function returns the number of iocbs which satisfy the filter.
11164 * This function is called without any lock held.
11167 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11168 lpfc_ctx_cmd ctx_cmd)
11170 struct lpfc_hba *phba = vport->phba;
11171 struct lpfc_iocbq *iocbq;
11174 spin_lock_irq(&phba->hbalock);
11175 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11176 iocbq = phba->sli.iocbq_lookup[i];
11178 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11182 spin_unlock_irq(&phba->hbalock);
11188 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11189 * @phba: Pointer to HBA context object
11190 * @cmdiocb: Pointer to command iocb object.
11191 * @rspiocb: Pointer to response iocb object.
11193 * This function is called when an aborted FCP iocb completes. This
11194 * function is called by the ring event handler with no lock held.
11195 * This function frees the iocb.
11198 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11199 struct lpfc_iocbq *rspiocb)
11201 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11202 "3096 ABORT_XRI_CN completing on rpi x%x "
11203 "original iotag x%x, abort cmd iotag x%x "
11204 "status 0x%x, reason 0x%x\n",
11205 cmdiocb->iocb.un.acxri.abortContextTag,
11206 cmdiocb->iocb.un.acxri.abortIoTag,
11207 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11208 rspiocb->iocb.un.ulpWord[4]);
11209 lpfc_sli_release_iocbq(phba, cmdiocb);
11214 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11215 * @vport: Pointer to virtual port.
11216 * @pring: Pointer to driver SLI ring object.
11217 * @tgt_id: SCSI ID of the target.
11218 * @lun_id: LUN ID of the scsi device.
11219 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11221 * This function sends an abort command for every SCSI command
11222 * associated with the given virtual port pending on the ring
11223 * filtered by lpfc_sli_validate_fcp_iocb function.
11224 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11225 * FCP iocbs associated with lun specified by tgt_id and lun_id
11227 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11228 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11229 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11230 * FCP iocbs associated with virtual port.
11231 * This function returns number of iocbs it failed to abort.
11232 * This function is called with no locks held.
11235 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11236 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11238 struct lpfc_hba *phba = vport->phba;
11239 struct lpfc_iocbq *iocbq;
11240 struct lpfc_iocbq *abtsiocb;
11241 struct lpfc_sli_ring *pring_s4;
11242 IOCB_t *cmd = NULL;
11243 int errcnt = 0, ret_val = 0;
11246 /* all I/Os are in process of being flushed */
11247 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
11250 for (i = 1; i <= phba->sli.last_iotag; i++) {
11251 iocbq = phba->sli.iocbq_lookup[i];
11253 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11258 * If the iocbq is already being aborted, don't take a second
11259 * action, but do count it.
11261 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11264 /* issue ABTS for this IOCB based on iotag */
11265 abtsiocb = lpfc_sli_get_iocbq(phba);
11266 if (abtsiocb == NULL) {
11271 /* indicate the IO is being aborted by the driver. */
11272 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11274 cmd = &iocbq->iocb;
11275 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11276 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11277 if (phba->sli_rev == LPFC_SLI_REV4)
11278 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11280 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11281 abtsiocb->iocb.ulpLe = 1;
11282 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11283 abtsiocb->vport = vport;
11285 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11286 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11287 if (iocbq->iocb_flag & LPFC_IO_FCP)
11288 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11289 if (iocbq->iocb_flag & LPFC_IO_FOF)
11290 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11292 if (lpfc_is_link_up(phba))
11293 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11295 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11297 /* Setup callback routine and issue the command. */
11298 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11299 if (phba->sli_rev == LPFC_SLI_REV4) {
11300 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11303 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11306 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11308 if (ret_val == IOCB_ERROR) {
11309 lpfc_sli_release_iocbq(phba, abtsiocb);
11319 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11320 * @vport: Pointer to virtual port.
11321 * @pring: Pointer to driver SLI ring object.
11322 * @tgt_id: SCSI ID of the target.
11323 * @lun_id: LUN ID of the scsi device.
11324 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11326 * This function sends an abort command for every SCSI command
11327 * associated with the given virtual port pending on the ring
11328 * filtered by lpfc_sli_validate_fcp_iocb function.
11329 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11330 * FCP iocbs associated with lun specified by tgt_id and lun_id
11332 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11333 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11334 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11335 * FCP iocbs associated with virtual port.
11336 * This function returns number of iocbs it aborted .
11337 * This function is called with no locks held right after a taskmgmt
11341 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11342 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11344 struct lpfc_hba *phba = vport->phba;
11345 struct lpfc_scsi_buf *lpfc_cmd;
11346 struct lpfc_iocbq *abtsiocbq;
11347 struct lpfc_nodelist *ndlp;
11348 struct lpfc_iocbq *iocbq;
11350 int sum, i, ret_val;
11351 unsigned long iflags;
11352 struct lpfc_sli_ring *pring_s4;
11354 spin_lock_irqsave(&phba->hbalock, iflags);
11356 /* all I/Os are in process of being flushed */
11357 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11358 spin_unlock_irqrestore(&phba->hbalock, iflags);
11363 for (i = 1; i <= phba->sli.last_iotag; i++) {
11364 iocbq = phba->sli.iocbq_lookup[i];
11366 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11371 * If the iocbq is already being aborted, don't take a second
11372 * action, but do count it.
11374 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11377 /* issue ABTS for this IOCB based on iotag */
11378 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11379 if (abtsiocbq == NULL)
11382 icmd = &iocbq->iocb;
11383 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11384 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11385 if (phba->sli_rev == LPFC_SLI_REV4)
11386 abtsiocbq->iocb.un.acxri.abortIoTag =
11387 iocbq->sli4_xritag;
11389 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11390 abtsiocbq->iocb.ulpLe = 1;
11391 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11392 abtsiocbq->vport = vport;
11394 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11395 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11396 if (iocbq->iocb_flag & LPFC_IO_FCP)
11397 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11398 if (iocbq->iocb_flag & LPFC_IO_FOF)
11399 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11401 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11402 ndlp = lpfc_cmd->rdata->pnode;
11404 if (lpfc_is_link_up(phba) &&
11405 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11406 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11408 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11410 /* Setup callback routine and issue the command. */
11411 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11414 * Indicate the IO is being aborted by the driver and set
11415 * the caller's flag into the aborted IO.
11417 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11419 if (phba->sli_rev == LPFC_SLI_REV4) {
11420 pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq);
11423 /* Note: both hbalock and ring_lock must be set here */
11424 spin_lock(&pring_s4->ring_lock);
11425 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11427 spin_unlock(&pring_s4->ring_lock);
11429 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11434 if (ret_val == IOCB_ERROR)
11435 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11439 spin_unlock_irqrestore(&phba->hbalock, iflags);
11444 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11445 * @phba: Pointer to HBA context object.
11446 * @cmdiocbq: Pointer to command iocb.
11447 * @rspiocbq: Pointer to response iocb.
11449 * This function is the completion handler for iocbs issued using
11450 * lpfc_sli_issue_iocb_wait function. This function is called by the
11451 * ring event handler function without any lock held. This function
11452 * can be called from both worker thread context and interrupt
11453 * context. This function also can be called from other thread which
11454 * cleans up the SLI layer objects.
11455 * This function copy the contents of the response iocb to the
11456 * response iocb memory object provided by the caller of
11457 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11458 * sleeps for the iocb completion.
11461 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11462 struct lpfc_iocbq *cmdiocbq,
11463 struct lpfc_iocbq *rspiocbq)
11465 wait_queue_head_t *pdone_q;
11466 unsigned long iflags;
11467 struct lpfc_scsi_buf *lpfc_cmd;
11469 spin_lock_irqsave(&phba->hbalock, iflags);
11470 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11473 * A time out has occurred for the iocb. If a time out
11474 * completion handler has been supplied, call it. Otherwise,
11475 * just free the iocbq.
11478 spin_unlock_irqrestore(&phba->hbalock, iflags);
11479 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11480 cmdiocbq->wait_iocb_cmpl = NULL;
11481 if (cmdiocbq->iocb_cmpl)
11482 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11484 lpfc_sli_release_iocbq(phba, cmdiocbq);
11488 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11489 if (cmdiocbq->context2 && rspiocbq)
11490 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11491 &rspiocbq->iocb, sizeof(IOCB_t));
11493 /* Set the exchange busy flag for task management commands */
11494 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11495 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11496 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
11498 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11501 pdone_q = cmdiocbq->context_un.wait_queue;
11504 spin_unlock_irqrestore(&phba->hbalock, iflags);
11509 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11510 * @phba: Pointer to HBA context object..
11511 * @piocbq: Pointer to command iocb.
11512 * @flag: Flag to test.
11514 * This routine grabs the hbalock and then test the iocb_flag to
11515 * see if the passed in flag is set.
11517 * 1 if flag is set.
11518 * 0 if flag is not set.
11521 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11522 struct lpfc_iocbq *piocbq, uint32_t flag)
11524 unsigned long iflags;
11527 spin_lock_irqsave(&phba->hbalock, iflags);
11528 ret = piocbq->iocb_flag & flag;
11529 spin_unlock_irqrestore(&phba->hbalock, iflags);
11535 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11536 * @phba: Pointer to HBA context object..
11537 * @pring: Pointer to sli ring.
11538 * @piocb: Pointer to command iocb.
11539 * @prspiocbq: Pointer to response iocb.
11540 * @timeout: Timeout in number of seconds.
11542 * This function issues the iocb to firmware and waits for the
11543 * iocb to complete. The iocb_cmpl field of the shall be used
11544 * to handle iocbs which time out. If the field is NULL, the
11545 * function shall free the iocbq structure. If more clean up is
11546 * needed, the caller is expected to provide a completion function
11547 * that will provide the needed clean up. If the iocb command is
11548 * not completed within timeout seconds, the function will either
11549 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11550 * completion function set in the iocb_cmpl field and then return
11551 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11552 * resources if this function returns IOCB_TIMEDOUT.
11553 * The function waits for the iocb completion using an
11554 * non-interruptible wait.
11555 * This function will sleep while waiting for iocb completion.
11556 * So, this function should not be called from any context which
11557 * does not allow sleeping. Due to the same reason, this function
11558 * cannot be called with interrupt disabled.
11559 * This function assumes that the iocb completions occur while
11560 * this function sleep. So, this function cannot be called from
11561 * the thread which process iocb completion for this ring.
11562 * This function clears the iocb_flag of the iocb object before
11563 * issuing the iocb and the iocb completion handler sets this
11564 * flag and wakes this thread when the iocb completes.
11565 * The contents of the response iocb will be copied to prspiocbq
11566 * by the completion handler when the command completes.
11567 * This function returns IOCB_SUCCESS when success.
11568 * This function is called with no lock held.
11571 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11572 uint32_t ring_number,
11573 struct lpfc_iocbq *piocb,
11574 struct lpfc_iocbq *prspiocbq,
11577 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11578 long timeleft, timeout_req = 0;
11579 int retval = IOCB_SUCCESS;
11581 struct lpfc_iocbq *iocb;
11583 int txcmplq_cnt = 0;
11584 struct lpfc_sli_ring *pring;
11585 unsigned long iflags;
11586 bool iocb_completed = true;
11588 if (phba->sli_rev >= LPFC_SLI_REV4)
11589 pring = lpfc_sli4_calc_ring(phba, piocb);
11591 pring = &phba->sli.sli3_ring[ring_number];
11593 * If the caller has provided a response iocbq buffer, then context2
11594 * is NULL or its an error.
11597 if (piocb->context2)
11599 piocb->context2 = prspiocbq;
11602 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11603 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11604 piocb->context_un.wait_queue = &done_q;
11605 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11607 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11608 if (lpfc_readl(phba->HCregaddr, &creg_val))
11610 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11611 writel(creg_val, phba->HCregaddr);
11612 readl(phba->HCregaddr); /* flush */
11615 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11616 SLI_IOCB_RET_IOCB);
11617 if (retval == IOCB_SUCCESS) {
11618 timeout_req = msecs_to_jiffies(timeout * 1000);
11619 timeleft = wait_event_timeout(done_q,
11620 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11622 spin_lock_irqsave(&phba->hbalock, iflags);
11623 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11626 * IOCB timed out. Inform the wake iocb wait
11627 * completion function and set local status
11630 iocb_completed = false;
11631 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11633 spin_unlock_irqrestore(&phba->hbalock, iflags);
11634 if (iocb_completed) {
11635 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11636 "0331 IOCB wake signaled\n");
11637 /* Note: we are not indicating if the IOCB has a success
11638 * status or not - that's for the caller to check.
11639 * IOCB_SUCCESS means just that the command was sent and
11640 * completed. Not that it completed successfully.
11642 } else if (timeleft == 0) {
11643 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11644 "0338 IOCB wait timeout error - no "
11645 "wake response Data x%x\n", timeout);
11646 retval = IOCB_TIMEDOUT;
11648 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11649 "0330 IOCB wake NOT set, "
11651 timeout, (timeleft / jiffies));
11652 retval = IOCB_TIMEDOUT;
11654 } else if (retval == IOCB_BUSY) {
11655 if (phba->cfg_log_verbose & LOG_SLI) {
11656 list_for_each_entry(iocb, &pring->txq, list) {
11659 list_for_each_entry(iocb, &pring->txcmplq, list) {
11662 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11663 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11664 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11668 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11669 "0332 IOCB wait issue failed, Data x%x\n",
11671 retval = IOCB_ERROR;
11674 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11675 if (lpfc_readl(phba->HCregaddr, &creg_val))
11677 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11678 writel(creg_val, phba->HCregaddr);
11679 readl(phba->HCregaddr); /* flush */
11683 piocb->context2 = NULL;
11685 piocb->context_un.wait_queue = NULL;
11686 piocb->iocb_cmpl = NULL;
11691 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
11692 * @phba: Pointer to HBA context object.
11693 * @pmboxq: Pointer to driver mailbox object.
11694 * @timeout: Timeout in number of seconds.
11696 * This function issues the mailbox to firmware and waits for the
11697 * mailbox command to complete. If the mailbox command is not
11698 * completed within timeout seconds, it returns MBX_TIMEOUT.
11699 * The function waits for the mailbox completion using an
11700 * interruptible wait. If the thread is woken up due to a
11701 * signal, MBX_TIMEOUT error is returned to the caller. Caller
11702 * should not free the mailbox resources, if this function returns
11704 * This function will sleep while waiting for mailbox completion.
11705 * So, this function should not be called from any context which
11706 * does not allow sleeping. Due to the same reason, this function
11707 * cannot be called with interrupt disabled.
11708 * This function assumes that the mailbox completion occurs while
11709 * this function sleep. So, this function cannot be called from
11710 * the worker thread which processes mailbox completion.
11711 * This function is called in the context of HBA management
11713 * This function returns MBX_SUCCESS when successful.
11714 * This function is called with no lock held.
11717 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
11720 struct completion mbox_done;
11722 unsigned long flag;
11724 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
11725 /* setup wake call as IOCB callback */
11726 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
11728 /* setup context3 field to pass wait_queue pointer to wake function */
11729 init_completion(&mbox_done);
11730 pmboxq->context3 = &mbox_done;
11731 /* now issue the command */
11732 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
11733 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
11734 wait_for_completion_timeout(&mbox_done,
11735 msecs_to_jiffies(timeout * 1000));
11737 spin_lock_irqsave(&phba->hbalock, flag);
11738 pmboxq->context3 = NULL;
11740 * if LPFC_MBX_WAKE flag is set the mailbox is completed
11741 * else do not free the resources.
11743 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
11744 retval = MBX_SUCCESS;
11746 retval = MBX_TIMEOUT;
11747 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
11749 spin_unlock_irqrestore(&phba->hbalock, flag);
11755 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
11756 * @phba: Pointer to HBA context.
11758 * This function is called to shutdown the driver's mailbox sub-system.
11759 * It first marks the mailbox sub-system is in a block state to prevent
11760 * the asynchronous mailbox command from issued off the pending mailbox
11761 * command queue. If the mailbox command sub-system shutdown is due to
11762 * HBA error conditions such as EEH or ERATT, this routine shall invoke
11763 * the mailbox sub-system flush routine to forcefully bring down the
11764 * mailbox sub-system. Otherwise, if it is due to normal condition (such
11765 * as with offline or HBA function reset), this routine will wait for the
11766 * outstanding mailbox command to complete before invoking the mailbox
11767 * sub-system flush routine to gracefully bring down mailbox sub-system.
11770 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
11772 struct lpfc_sli *psli = &phba->sli;
11773 unsigned long timeout;
11775 if (mbx_action == LPFC_MBX_NO_WAIT) {
11776 /* delay 100ms for port state */
11778 lpfc_sli_mbox_sys_flush(phba);
11781 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
11783 spin_lock_irq(&phba->hbalock);
11784 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11786 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
11787 /* Determine how long we might wait for the active mailbox
11788 * command to be gracefully completed by firmware.
11790 if (phba->sli.mbox_active)
11791 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
11792 phba->sli.mbox_active) *
11794 spin_unlock_irq(&phba->hbalock);
11796 while (phba->sli.mbox_active) {
11797 /* Check active mailbox complete status every 2ms */
11799 if (time_after(jiffies, timeout))
11800 /* Timeout, let the mailbox flush routine to
11801 * forcefully release active mailbox command
11806 spin_unlock_irq(&phba->hbalock);
11808 lpfc_sli_mbox_sys_flush(phba);
11812 * lpfc_sli_eratt_read - read sli-3 error attention events
11813 * @phba: Pointer to HBA context.
11815 * This function is called to read the SLI3 device error attention registers
11816 * for possible error attention events. The caller must hold the hostlock
11817 * with spin_lock_irq().
11819 * This function returns 1 when there is Error Attention in the Host Attention
11820 * Register and returns 0 otherwise.
11823 lpfc_sli_eratt_read(struct lpfc_hba *phba)
11827 /* Read chip Host Attention (HA) register */
11828 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11831 if (ha_copy & HA_ERATT) {
11832 /* Read host status register to retrieve error event */
11833 if (lpfc_sli_read_hs(phba))
11836 /* Check if there is a deferred error condition is active */
11837 if ((HS_FFER1 & phba->work_hs) &&
11838 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11839 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
11840 phba->hba_flag |= DEFER_ERATT;
11841 /* Clear all interrupt enable conditions */
11842 writel(0, phba->HCregaddr);
11843 readl(phba->HCregaddr);
11846 /* Set the driver HA work bitmap */
11847 phba->work_ha |= HA_ERATT;
11848 /* Indicate polling handles this ERATT */
11849 phba->hba_flag |= HBA_ERATT_HANDLED;
11855 /* Set the driver HS work bitmap */
11856 phba->work_hs |= UNPLUG_ERR;
11857 /* Set the driver HA work bitmap */
11858 phba->work_ha |= HA_ERATT;
11859 /* Indicate polling handles this ERATT */
11860 phba->hba_flag |= HBA_ERATT_HANDLED;
11865 * lpfc_sli4_eratt_read - read sli-4 error attention events
11866 * @phba: Pointer to HBA context.
11868 * This function is called to read the SLI4 device error attention registers
11869 * for possible error attention events. The caller must hold the hostlock
11870 * with spin_lock_irq().
11872 * This function returns 1 when there is Error Attention in the Host Attention
11873 * Register and returns 0 otherwise.
11876 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
11878 uint32_t uerr_sta_hi, uerr_sta_lo;
11879 uint32_t if_type, portsmphr;
11880 struct lpfc_register portstat_reg;
11883 * For now, use the SLI4 device internal unrecoverable error
11884 * registers for error attention. This can be changed later.
11886 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11888 case LPFC_SLI_INTF_IF_TYPE_0:
11889 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
11891 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
11893 phba->work_hs |= UNPLUG_ERR;
11894 phba->work_ha |= HA_ERATT;
11895 phba->hba_flag |= HBA_ERATT_HANDLED;
11898 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
11899 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
11900 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11901 "1423 HBA Unrecoverable error: "
11902 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
11903 "ue_mask_lo_reg=0x%x, "
11904 "ue_mask_hi_reg=0x%x\n",
11905 uerr_sta_lo, uerr_sta_hi,
11906 phba->sli4_hba.ue_mask_lo,
11907 phba->sli4_hba.ue_mask_hi);
11908 phba->work_status[0] = uerr_sta_lo;
11909 phba->work_status[1] = uerr_sta_hi;
11910 phba->work_ha |= HA_ERATT;
11911 phba->hba_flag |= HBA_ERATT_HANDLED;
11915 case LPFC_SLI_INTF_IF_TYPE_2:
11916 case LPFC_SLI_INTF_IF_TYPE_6:
11917 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
11918 &portstat_reg.word0) ||
11919 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
11921 phba->work_hs |= UNPLUG_ERR;
11922 phba->work_ha |= HA_ERATT;
11923 phba->hba_flag |= HBA_ERATT_HANDLED;
11926 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
11927 phba->work_status[0] =
11928 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
11929 phba->work_status[1] =
11930 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
11931 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11932 "2885 Port Status Event: "
11933 "port status reg 0x%x, "
11934 "port smphr reg 0x%x, "
11935 "error 1=0x%x, error 2=0x%x\n",
11936 portstat_reg.word0,
11938 phba->work_status[0],
11939 phba->work_status[1]);
11940 phba->work_ha |= HA_ERATT;
11941 phba->hba_flag |= HBA_ERATT_HANDLED;
11945 case LPFC_SLI_INTF_IF_TYPE_1:
11947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11948 "2886 HBA Error Attention on unsupported "
11949 "if type %d.", if_type);
11957 * lpfc_sli_check_eratt - check error attention events
11958 * @phba: Pointer to HBA context.
11960 * This function is called from timer soft interrupt context to check HBA's
11961 * error attention register bit for error attention events.
11963 * This function returns 1 when there is Error Attention in the Host Attention
11964 * Register and returns 0 otherwise.
11967 lpfc_sli_check_eratt(struct lpfc_hba *phba)
11971 /* If somebody is waiting to handle an eratt, don't process it
11972 * here. The brdkill function will do this.
11974 if (phba->link_flag & LS_IGNORE_ERATT)
11977 /* Check if interrupt handler handles this ERATT */
11978 spin_lock_irq(&phba->hbalock);
11979 if (phba->hba_flag & HBA_ERATT_HANDLED) {
11980 /* Interrupt handler has handled ERATT */
11981 spin_unlock_irq(&phba->hbalock);
11986 * If there is deferred error attention, do not check for error
11989 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11990 spin_unlock_irq(&phba->hbalock);
11994 /* If PCI channel is offline, don't process it */
11995 if (unlikely(pci_channel_offline(phba->pcidev))) {
11996 spin_unlock_irq(&phba->hbalock);
12000 switch (phba->sli_rev) {
12001 case LPFC_SLI_REV2:
12002 case LPFC_SLI_REV3:
12003 /* Read chip Host Attention (HA) register */
12004 ha_copy = lpfc_sli_eratt_read(phba);
12006 case LPFC_SLI_REV4:
12007 /* Read device Uncoverable Error (UERR) registers */
12008 ha_copy = lpfc_sli4_eratt_read(phba);
12011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12012 "0299 Invalid SLI revision (%d)\n",
12017 spin_unlock_irq(&phba->hbalock);
12023 * lpfc_intr_state_check - Check device state for interrupt handling
12024 * @phba: Pointer to HBA context.
12026 * This inline routine checks whether a device or its PCI slot is in a state
12027 * that the interrupt should be handled.
12029 * This function returns 0 if the device or the PCI slot is in a state that
12030 * interrupt should be handled, otherwise -EIO.
12033 lpfc_intr_state_check(struct lpfc_hba *phba)
12035 /* If the pci channel is offline, ignore all the interrupts */
12036 if (unlikely(pci_channel_offline(phba->pcidev)))
12039 /* Update device level interrupt statistics */
12040 phba->sli.slistat.sli_intr++;
12042 /* Ignore all interrupts during initialization. */
12043 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12050 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12051 * @irq: Interrupt number.
12052 * @dev_id: The device context pointer.
12054 * This function is directly called from the PCI layer as an interrupt
12055 * service routine when device with SLI-3 interface spec is enabled with
12056 * MSI-X multi-message interrupt mode and there are slow-path events in
12057 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12058 * interrupt mode, this function is called as part of the device-level
12059 * interrupt handler. When the PCI slot is in error recovery or the HBA
12060 * is undergoing initialization, the interrupt handler will not process
12061 * the interrupt. The link attention and ELS ring attention events are
12062 * handled by the worker thread. The interrupt handler signals the worker
12063 * thread and returns for these events. This function is called without
12064 * any lock held. It gets the hbalock to access and update SLI data
12067 * This function returns IRQ_HANDLED when interrupt is handled else it
12068 * returns IRQ_NONE.
12071 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12073 struct lpfc_hba *phba;
12074 uint32_t ha_copy, hc_copy;
12075 uint32_t work_ha_copy;
12076 unsigned long status;
12077 unsigned long iflag;
12080 MAILBOX_t *mbox, *pmbox;
12081 struct lpfc_vport *vport;
12082 struct lpfc_nodelist *ndlp;
12083 struct lpfc_dmabuf *mp;
12088 * Get the driver's phba structure from the dev_id and
12089 * assume the HBA is not interrupting.
12091 phba = (struct lpfc_hba *)dev_id;
12093 if (unlikely(!phba))
12097 * Stuff needs to be attented to when this function is invoked as an
12098 * individual interrupt handler in MSI-X multi-message interrupt mode
12100 if (phba->intr_type == MSIX) {
12101 /* Check device state for handling interrupt */
12102 if (lpfc_intr_state_check(phba))
12104 /* Need to read HA REG for slow-path events */
12105 spin_lock_irqsave(&phba->hbalock, iflag);
12106 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12108 /* If somebody is waiting to handle an eratt don't process it
12109 * here. The brdkill function will do this.
12111 if (phba->link_flag & LS_IGNORE_ERATT)
12112 ha_copy &= ~HA_ERATT;
12113 /* Check the need for handling ERATT in interrupt handler */
12114 if (ha_copy & HA_ERATT) {
12115 if (phba->hba_flag & HBA_ERATT_HANDLED)
12116 /* ERATT polling has handled ERATT */
12117 ha_copy &= ~HA_ERATT;
12119 /* Indicate interrupt handler handles ERATT */
12120 phba->hba_flag |= HBA_ERATT_HANDLED;
12124 * If there is deferred error attention, do not check for any
12127 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12128 spin_unlock_irqrestore(&phba->hbalock, iflag);
12132 /* Clear up only attention source related to slow-path */
12133 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12136 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12137 HC_LAINT_ENA | HC_ERINT_ENA),
12139 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12141 writel(hc_copy, phba->HCregaddr);
12142 readl(phba->HAregaddr); /* flush */
12143 spin_unlock_irqrestore(&phba->hbalock, iflag);
12145 ha_copy = phba->ha_copy;
12147 work_ha_copy = ha_copy & phba->work_ha_mask;
12149 if (work_ha_copy) {
12150 if (work_ha_copy & HA_LATT) {
12151 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12153 * Turn off Link Attention interrupts
12154 * until CLEAR_LA done
12156 spin_lock_irqsave(&phba->hbalock, iflag);
12157 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12158 if (lpfc_readl(phba->HCregaddr, &control))
12160 control &= ~HC_LAINT_ENA;
12161 writel(control, phba->HCregaddr);
12162 readl(phba->HCregaddr); /* flush */
12163 spin_unlock_irqrestore(&phba->hbalock, iflag);
12166 work_ha_copy &= ~HA_LATT;
12169 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12171 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12172 * the only slow ring.
12174 status = (work_ha_copy &
12175 (HA_RXMASK << (4*LPFC_ELS_RING)));
12176 status >>= (4*LPFC_ELS_RING);
12177 if (status & HA_RXMASK) {
12178 spin_lock_irqsave(&phba->hbalock, iflag);
12179 if (lpfc_readl(phba->HCregaddr, &control))
12182 lpfc_debugfs_slow_ring_trc(phba,
12183 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12185 (uint32_t)phba->sli.slistat.sli_intr);
12187 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12188 lpfc_debugfs_slow_ring_trc(phba,
12189 "ISR Disable ring:"
12190 "pwork:x%x hawork:x%x wait:x%x",
12191 phba->work_ha, work_ha_copy,
12192 (uint32_t)((unsigned long)
12193 &phba->work_waitq));
12196 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12197 writel(control, phba->HCregaddr);
12198 readl(phba->HCregaddr); /* flush */
12201 lpfc_debugfs_slow_ring_trc(phba,
12202 "ISR slow ring: pwork:"
12203 "x%x hawork:x%x wait:x%x",
12204 phba->work_ha, work_ha_copy,
12205 (uint32_t)((unsigned long)
12206 &phba->work_waitq));
12208 spin_unlock_irqrestore(&phba->hbalock, iflag);
12211 spin_lock_irqsave(&phba->hbalock, iflag);
12212 if (work_ha_copy & HA_ERATT) {
12213 if (lpfc_sli_read_hs(phba))
12216 * Check if there is a deferred error condition
12219 if ((HS_FFER1 & phba->work_hs) &&
12220 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12221 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12223 phba->hba_flag |= DEFER_ERATT;
12224 /* Clear all interrupt enable conditions */
12225 writel(0, phba->HCregaddr);
12226 readl(phba->HCregaddr);
12230 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12231 pmb = phba->sli.mbox_active;
12232 pmbox = &pmb->u.mb;
12234 vport = pmb->vport;
12236 /* First check out the status word */
12237 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12238 if (pmbox->mbxOwner != OWN_HOST) {
12239 spin_unlock_irqrestore(&phba->hbalock, iflag);
12241 * Stray Mailbox Interrupt, mbxCommand <cmd>
12242 * mbxStatus <status>
12244 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12246 "(%d):0304 Stray Mailbox "
12247 "Interrupt mbxCommand x%x "
12249 (vport ? vport->vpi : 0),
12252 /* clear mailbox attention bit */
12253 work_ha_copy &= ~HA_MBATT;
12255 phba->sli.mbox_active = NULL;
12256 spin_unlock_irqrestore(&phba->hbalock, iflag);
12257 phba->last_completion_time = jiffies;
12258 del_timer(&phba->sli.mbox_tmo);
12259 if (pmb->mbox_cmpl) {
12260 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12262 if (pmb->out_ext_byte_len &&
12264 lpfc_sli_pcimem_bcopy(
12267 pmb->out_ext_byte_len);
12269 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12270 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12272 lpfc_debugfs_disc_trc(vport,
12273 LPFC_DISC_TRC_MBOX_VPORT,
12274 "MBOX dflt rpi: : "
12275 "status:x%x rpi:x%x",
12276 (uint32_t)pmbox->mbxStatus,
12277 pmbox->un.varWords[0], 0);
12279 if (!pmbox->mbxStatus) {
12280 mp = (struct lpfc_dmabuf *)
12282 ndlp = (struct lpfc_nodelist *)
12285 /* Reg_LOGIN of dflt RPI was
12286 * successful. new lets get
12287 * rid of the RPI using the
12288 * same mbox buffer.
12290 lpfc_unreg_login(phba,
12292 pmbox->un.varWords[0],
12295 lpfc_mbx_cmpl_dflt_rpi;
12296 pmb->context1 = mp;
12297 pmb->context2 = ndlp;
12298 pmb->vport = vport;
12299 rc = lpfc_sli_issue_mbox(phba,
12302 if (rc != MBX_BUSY)
12303 lpfc_printf_log(phba,
12305 LOG_MBOX | LOG_SLI,
12306 "0350 rc should have"
12307 "been MBX_BUSY\n");
12308 if (rc != MBX_NOT_FINISHED)
12309 goto send_current_mbox;
12313 &phba->pport->work_port_lock,
12315 phba->pport->work_port_events &=
12317 spin_unlock_irqrestore(
12318 &phba->pport->work_port_lock,
12320 lpfc_mbox_cmpl_put(phba, pmb);
12323 spin_unlock_irqrestore(&phba->hbalock, iflag);
12325 if ((work_ha_copy & HA_MBATT) &&
12326 (phba->sli.mbox_active == NULL)) {
12328 /* Process next mailbox command if there is one */
12330 rc = lpfc_sli_issue_mbox(phba, NULL,
12332 } while (rc == MBX_NOT_FINISHED);
12333 if (rc != MBX_SUCCESS)
12334 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12335 LOG_SLI, "0349 rc should be "
12339 spin_lock_irqsave(&phba->hbalock, iflag);
12340 phba->work_ha |= work_ha_copy;
12341 spin_unlock_irqrestore(&phba->hbalock, iflag);
12342 lpfc_worker_wake_up(phba);
12344 return IRQ_HANDLED;
12346 spin_unlock_irqrestore(&phba->hbalock, iflag);
12347 return IRQ_HANDLED;
12349 } /* lpfc_sli_sp_intr_handler */
12352 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12353 * @irq: Interrupt number.
12354 * @dev_id: The device context pointer.
12356 * This function is directly called from the PCI layer as an interrupt
12357 * service routine when device with SLI-3 interface spec is enabled with
12358 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12359 * ring event in the HBA. However, when the device is enabled with either
12360 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12361 * device-level interrupt handler. When the PCI slot is in error recovery
12362 * or the HBA is undergoing initialization, the interrupt handler will not
12363 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12364 * the intrrupt context. This function is called without any lock held.
12365 * It gets the hbalock to access and update SLI data structures.
12367 * This function returns IRQ_HANDLED when interrupt is handled else it
12368 * returns IRQ_NONE.
12371 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12373 struct lpfc_hba *phba;
12375 unsigned long status;
12376 unsigned long iflag;
12377 struct lpfc_sli_ring *pring;
12379 /* Get the driver's phba structure from the dev_id and
12380 * assume the HBA is not interrupting.
12382 phba = (struct lpfc_hba *) dev_id;
12384 if (unlikely(!phba))
12388 * Stuff needs to be attented to when this function is invoked as an
12389 * individual interrupt handler in MSI-X multi-message interrupt mode
12391 if (phba->intr_type == MSIX) {
12392 /* Check device state for handling interrupt */
12393 if (lpfc_intr_state_check(phba))
12395 /* Need to read HA REG for FCP ring and other ring events */
12396 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12397 return IRQ_HANDLED;
12398 /* Clear up only attention source related to fast-path */
12399 spin_lock_irqsave(&phba->hbalock, iflag);
12401 * If there is deferred error attention, do not check for
12404 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12405 spin_unlock_irqrestore(&phba->hbalock, iflag);
12408 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12410 readl(phba->HAregaddr); /* flush */
12411 spin_unlock_irqrestore(&phba->hbalock, iflag);
12413 ha_copy = phba->ha_copy;
12416 * Process all events on FCP ring. Take the optimized path for FCP IO.
12418 ha_copy &= ~(phba->work_ha_mask);
12420 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12421 status >>= (4*LPFC_FCP_RING);
12422 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12423 if (status & HA_RXMASK)
12424 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12426 if (phba->cfg_multi_ring_support == 2) {
12428 * Process all events on extra ring. Take the optimized path
12429 * for extra ring IO.
12431 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12432 status >>= (4*LPFC_EXTRA_RING);
12433 if (status & HA_RXMASK) {
12434 lpfc_sli_handle_fast_ring_event(phba,
12435 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12439 return IRQ_HANDLED;
12440 } /* lpfc_sli_fp_intr_handler */
12443 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12444 * @irq: Interrupt number.
12445 * @dev_id: The device context pointer.
12447 * This function is the HBA device-level interrupt handler to device with
12448 * SLI-3 interface spec, called from the PCI layer when either MSI or
12449 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12450 * requires driver attention. This function invokes the slow-path interrupt
12451 * attention handling function and fast-path interrupt attention handling
12452 * function in turn to process the relevant HBA attention events. This
12453 * function is called without any lock held. It gets the hbalock to access
12454 * and update SLI data structures.
12456 * This function returns IRQ_HANDLED when interrupt is handled, else it
12457 * returns IRQ_NONE.
12460 lpfc_sli_intr_handler(int irq, void *dev_id)
12462 struct lpfc_hba *phba;
12463 irqreturn_t sp_irq_rc, fp_irq_rc;
12464 unsigned long status1, status2;
12468 * Get the driver's phba structure from the dev_id and
12469 * assume the HBA is not interrupting.
12471 phba = (struct lpfc_hba *) dev_id;
12473 if (unlikely(!phba))
12476 /* Check device state for handling interrupt */
12477 if (lpfc_intr_state_check(phba))
12480 spin_lock(&phba->hbalock);
12481 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12482 spin_unlock(&phba->hbalock);
12483 return IRQ_HANDLED;
12486 if (unlikely(!phba->ha_copy)) {
12487 spin_unlock(&phba->hbalock);
12489 } else if (phba->ha_copy & HA_ERATT) {
12490 if (phba->hba_flag & HBA_ERATT_HANDLED)
12491 /* ERATT polling has handled ERATT */
12492 phba->ha_copy &= ~HA_ERATT;
12494 /* Indicate interrupt handler handles ERATT */
12495 phba->hba_flag |= HBA_ERATT_HANDLED;
12499 * If there is deferred error attention, do not check for any interrupt.
12501 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12502 spin_unlock(&phba->hbalock);
12506 /* Clear attention sources except link and error attentions */
12507 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12508 spin_unlock(&phba->hbalock);
12509 return IRQ_HANDLED;
12511 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12512 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12514 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12515 writel(hc_copy, phba->HCregaddr);
12516 readl(phba->HAregaddr); /* flush */
12517 spin_unlock(&phba->hbalock);
12520 * Invokes slow-path host attention interrupt handling as appropriate.
12523 /* status of events with mailbox and link attention */
12524 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12526 /* status of events with ELS ring */
12527 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12528 status2 >>= (4*LPFC_ELS_RING);
12530 if (status1 || (status2 & HA_RXMASK))
12531 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12533 sp_irq_rc = IRQ_NONE;
12536 * Invoke fast-path host attention interrupt handling as appropriate.
12539 /* status of events with FCP ring */
12540 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12541 status1 >>= (4*LPFC_FCP_RING);
12543 /* status of events with extra ring */
12544 if (phba->cfg_multi_ring_support == 2) {
12545 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12546 status2 >>= (4*LPFC_EXTRA_RING);
12550 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12551 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12553 fp_irq_rc = IRQ_NONE;
12555 /* Return device-level interrupt handling status */
12556 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12557 } /* lpfc_sli_intr_handler */
12560 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
12561 * @phba: pointer to lpfc hba data structure.
12563 * This routine is invoked by the worker thread to process all the pending
12564 * SLI4 FCP abort XRI events.
12566 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12568 struct lpfc_cq_event *cq_event;
12570 /* First, declare the fcp xri abort event has been handled */
12571 spin_lock_irq(&phba->hbalock);
12572 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12573 spin_unlock_irq(&phba->hbalock);
12574 /* Now, handle all the fcp xri abort events */
12575 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12576 /* Get the first event from the head of the event queue */
12577 spin_lock_irq(&phba->hbalock);
12578 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12579 cq_event, struct lpfc_cq_event, list);
12580 spin_unlock_irq(&phba->hbalock);
12581 /* Notify aborted XRI for FCP work queue */
12582 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12583 /* Free the event processed back to the free pool */
12584 lpfc_sli4_cq_event_release(phba, cq_event);
12589 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12590 * @phba: pointer to lpfc hba data structure.
12592 * This routine is invoked by the worker thread to process all the pending
12593 * SLI4 els abort xri events.
12595 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12597 struct lpfc_cq_event *cq_event;
12599 /* First, declare the els xri abort event has been handled */
12600 spin_lock_irq(&phba->hbalock);
12601 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12602 spin_unlock_irq(&phba->hbalock);
12603 /* Now, handle all the els xri abort events */
12604 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12605 /* Get the first event from the head of the event queue */
12606 spin_lock_irq(&phba->hbalock);
12607 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12608 cq_event, struct lpfc_cq_event, list);
12609 spin_unlock_irq(&phba->hbalock);
12610 /* Notify aborted XRI for ELS work queue */
12611 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12612 /* Free the event processed back to the free pool */
12613 lpfc_sli4_cq_event_release(phba, cq_event);
12618 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12619 * @phba: pointer to lpfc hba data structure
12620 * @pIocbIn: pointer to the rspiocbq
12621 * @pIocbOut: pointer to the cmdiocbq
12622 * @wcqe: pointer to the complete wcqe
12624 * This routine transfers the fields of a command iocbq to a response iocbq
12625 * by copying all the IOCB fields from command iocbq and transferring the
12626 * completion status information from the complete wcqe.
12629 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12630 struct lpfc_iocbq *pIocbIn,
12631 struct lpfc_iocbq *pIocbOut,
12632 struct lpfc_wcqe_complete *wcqe)
12635 unsigned long iflags;
12636 uint32_t status, max_response;
12637 struct lpfc_dmabuf *dmabuf;
12638 struct ulp_bde64 *bpl, bde;
12639 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12641 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12642 sizeof(struct lpfc_iocbq) - offset);
12643 /* Map WCQE parameters into irspiocb parameters */
12644 status = bf_get(lpfc_wcqe_c_status, wcqe);
12645 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12646 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12647 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12648 pIocbIn->iocb.un.fcpi.fcpi_parm =
12649 pIocbOut->iocb.un.fcpi.fcpi_parm -
12650 wcqe->total_data_placed;
12652 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12654 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12655 switch (pIocbOut->iocb.ulpCommand) {
12656 case CMD_ELS_REQUEST64_CR:
12657 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12658 bpl = (struct ulp_bde64 *)dmabuf->virt;
12659 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12660 max_response = bde.tus.f.bdeSize;
12662 case CMD_GEN_REQUEST64_CR:
12664 if (!pIocbOut->context3)
12666 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12667 sizeof(struct ulp_bde64);
12668 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12669 bpl = (struct ulp_bde64 *)dmabuf->virt;
12670 for (i = 0; i < numBdes; i++) {
12671 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12672 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12673 max_response += bde.tus.f.bdeSize;
12677 max_response = wcqe->total_data_placed;
12680 if (max_response < wcqe->total_data_placed)
12681 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12683 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12684 wcqe->total_data_placed;
12687 /* Convert BG errors for completion status */
12688 if (status == CQE_STATUS_DI_ERROR) {
12689 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12691 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12692 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12694 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12696 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12697 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12698 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12699 BGS_GUARD_ERR_MASK;
12700 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12701 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12702 BGS_APPTAG_ERR_MASK;
12703 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12704 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12705 BGS_REFTAG_ERR_MASK;
12707 /* Check to see if there was any good data before the error */
12708 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12709 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12710 BGS_HI_WATER_MARK_PRESENT_MASK;
12711 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12712 wcqe->total_data_placed;
12716 * Set ALL the error bits to indicate we don't know what
12717 * type of error it is.
12719 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12720 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12721 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12722 BGS_GUARD_ERR_MASK);
12725 /* Pick up HBA exchange busy condition */
12726 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12727 spin_lock_irqsave(&phba->hbalock, iflags);
12728 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12729 spin_unlock_irqrestore(&phba->hbalock, iflags);
12734 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
12735 * @phba: Pointer to HBA context object.
12736 * @wcqe: Pointer to work-queue completion queue entry.
12738 * This routine handles an ELS work-queue completion event and construct
12739 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
12740 * discovery engine to handle.
12742 * Return: Pointer to the receive IOCBQ, NULL otherwise.
12744 static struct lpfc_iocbq *
12745 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12746 struct lpfc_iocbq *irspiocbq)
12748 struct lpfc_sli_ring *pring;
12749 struct lpfc_iocbq *cmdiocbq;
12750 struct lpfc_wcqe_complete *wcqe;
12751 unsigned long iflags;
12753 pring = lpfc_phba_elsring(phba);
12754 if (unlikely(!pring))
12757 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
12758 spin_lock_irqsave(&pring->ring_lock, iflags);
12759 pring->stats.iocb_event++;
12760 /* Look up the ELS command IOCB and create pseudo response IOCB */
12761 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12762 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12763 if (unlikely(!cmdiocbq)) {
12764 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12765 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12766 "0386 ELS complete with no corresponding "
12767 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
12768 wcqe->word0, wcqe->total_data_placed,
12769 wcqe->parameter, wcqe->word3);
12770 lpfc_sli_release_iocbq(phba, irspiocbq);
12774 /* Put the iocb back on the txcmplq */
12775 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
12776 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12778 /* Fake the irspiocbq and copy necessary response information */
12779 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
12784 inline struct lpfc_cq_event *
12785 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
12787 struct lpfc_cq_event *cq_event;
12789 /* Allocate a new internal CQ_EVENT entry */
12790 cq_event = lpfc_sli4_cq_event_alloc(phba);
12792 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12793 "0602 Failed to alloc CQ_EVENT entry\n");
12797 /* Move the CQE into the event */
12798 memcpy(&cq_event->cqe, entry, size);
12803 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
12804 * @phba: Pointer to HBA context object.
12805 * @cqe: Pointer to mailbox completion queue entry.
12807 * This routine process a mailbox completion queue entry with asynchrous
12810 * Return: true if work posted to worker thread, otherwise false.
12813 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12815 struct lpfc_cq_event *cq_event;
12816 unsigned long iflags;
12818 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12819 "0392 Async Event: word0:x%x, word1:x%x, "
12820 "word2:x%x, word3:x%x\n", mcqe->word0,
12821 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
12823 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
12826 spin_lock_irqsave(&phba->hbalock, iflags);
12827 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
12828 /* Set the async event flag */
12829 phba->hba_flag |= ASYNC_EVENT;
12830 spin_unlock_irqrestore(&phba->hbalock, iflags);
12836 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
12837 * @phba: Pointer to HBA context object.
12838 * @cqe: Pointer to mailbox completion queue entry.
12840 * This routine process a mailbox completion queue entry with mailbox
12841 * completion event.
12843 * Return: true if work posted to worker thread, otherwise false.
12846 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12848 uint32_t mcqe_status;
12849 MAILBOX_t *mbox, *pmbox;
12850 struct lpfc_mqe *mqe;
12851 struct lpfc_vport *vport;
12852 struct lpfc_nodelist *ndlp;
12853 struct lpfc_dmabuf *mp;
12854 unsigned long iflags;
12856 bool workposted = false;
12859 /* If not a mailbox complete MCQE, out by checking mailbox consume */
12860 if (!bf_get(lpfc_trailer_completed, mcqe))
12861 goto out_no_mqe_complete;
12863 /* Get the reference to the active mbox command */
12864 spin_lock_irqsave(&phba->hbalock, iflags);
12865 pmb = phba->sli.mbox_active;
12866 if (unlikely(!pmb)) {
12867 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
12868 "1832 No pending MBOX command to handle\n");
12869 spin_unlock_irqrestore(&phba->hbalock, iflags);
12870 goto out_no_mqe_complete;
12872 spin_unlock_irqrestore(&phba->hbalock, iflags);
12874 pmbox = (MAILBOX_t *)&pmb->u.mqe;
12876 vport = pmb->vport;
12878 /* Reset heartbeat timer */
12879 phba->last_completion_time = jiffies;
12880 del_timer(&phba->sli.mbox_tmo);
12882 /* Move mbox data to caller's mailbox region, do endian swapping */
12883 if (pmb->mbox_cmpl && mbox)
12884 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
12887 * For mcqe errors, conditionally move a modified error code to
12888 * the mbox so that the error will not be missed.
12890 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
12891 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
12892 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
12893 bf_set(lpfc_mqe_status, mqe,
12894 (LPFC_MBX_ERROR_RANGE | mcqe_status));
12896 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12897 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12898 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
12899 "MBOX dflt rpi: status:x%x rpi:x%x",
12901 pmbox->un.varWords[0], 0);
12902 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
12903 mp = (struct lpfc_dmabuf *)(pmb->context1);
12904 ndlp = (struct lpfc_nodelist *)pmb->context2;
12905 /* Reg_LOGIN of dflt RPI was successful. Now lets get
12906 * RID of the PPI using the same mbox buffer.
12908 lpfc_unreg_login(phba, vport->vpi,
12909 pmbox->un.varWords[0], pmb);
12910 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
12911 pmb->context1 = mp;
12912 pmb->context2 = ndlp;
12913 pmb->vport = vport;
12914 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
12915 if (rc != MBX_BUSY)
12916 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12917 LOG_SLI, "0385 rc should "
12918 "have been MBX_BUSY\n");
12919 if (rc != MBX_NOT_FINISHED)
12920 goto send_current_mbox;
12923 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
12924 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12925 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
12927 /* There is mailbox completion work to do */
12928 spin_lock_irqsave(&phba->hbalock, iflags);
12929 __lpfc_mbox_cmpl_put(phba, pmb);
12930 phba->work_ha |= HA_MBATT;
12931 spin_unlock_irqrestore(&phba->hbalock, iflags);
12935 spin_lock_irqsave(&phba->hbalock, iflags);
12936 /* Release the mailbox command posting token */
12937 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12938 /* Setting active mailbox pointer need to be in sync to flag clear */
12939 phba->sli.mbox_active = NULL;
12940 if (bf_get(lpfc_trailer_consumed, mcqe))
12941 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
12942 spin_unlock_irqrestore(&phba->hbalock, iflags);
12943 /* Wake up worker thread to post the next pending mailbox command */
12944 lpfc_worker_wake_up(phba);
12947 out_no_mqe_complete:
12948 spin_lock_irqsave(&phba->hbalock, iflags);
12949 if (bf_get(lpfc_trailer_consumed, mcqe))
12950 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
12951 spin_unlock_irqrestore(&phba->hbalock, iflags);
12956 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
12957 * @phba: Pointer to HBA context object.
12958 * @cqe: Pointer to mailbox completion queue entry.
12960 * This routine process a mailbox completion queue entry, it invokes the
12961 * proper mailbox complete handling or asynchrous event handling routine
12962 * according to the MCQE's async bit.
12964 * Return: true if work posted to worker thread, otherwise false.
12967 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
12969 struct lpfc_mcqe mcqe;
12972 /* Copy the mailbox MCQE and convert endian order as needed */
12973 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
12975 /* Invoke the proper event handling routine */
12976 if (!bf_get(lpfc_trailer_async, &mcqe))
12977 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
12979 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
12984 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
12985 * @phba: Pointer to HBA context object.
12986 * @cq: Pointer to associated CQ
12987 * @wcqe: Pointer to work-queue completion queue entry.
12989 * This routine handles an ELS work-queue completion event.
12991 * Return: true if work posted to worker thread, otherwise false.
12994 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12995 struct lpfc_wcqe_complete *wcqe)
12997 struct lpfc_iocbq *irspiocbq;
12998 unsigned long iflags;
12999 struct lpfc_sli_ring *pring = cq->pring;
13001 int txcmplq_cnt = 0;
13002 int fcp_txcmplq_cnt = 0;
13004 /* Check for response status */
13005 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13006 /* Log the error status */
13007 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13008 "0357 ELS CQE error: status=x%x: "
13009 "CQE: %08x %08x %08x %08x\n",
13010 bf_get(lpfc_wcqe_c_status, wcqe),
13011 wcqe->word0, wcqe->total_data_placed,
13012 wcqe->parameter, wcqe->word3);
13015 /* Get an irspiocbq for later ELS response processing use */
13016 irspiocbq = lpfc_sli_get_iocbq(phba);
13018 if (!list_empty(&pring->txq))
13020 if (!list_empty(&pring->txcmplq))
13022 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13023 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13024 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
13025 txq_cnt, phba->iocb_cnt,
13031 /* Save off the slow-path queue event for work thread to process */
13032 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13033 spin_lock_irqsave(&phba->hbalock, iflags);
13034 list_add_tail(&irspiocbq->cq_event.list,
13035 &phba->sli4_hba.sp_queue_event);
13036 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13037 spin_unlock_irqrestore(&phba->hbalock, iflags);
13043 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13044 * @phba: Pointer to HBA context object.
13045 * @wcqe: Pointer to work-queue completion queue entry.
13047 * This routine handles slow-path WQ entry consumed event by invoking the
13048 * proper WQ release routine to the slow-path WQ.
13051 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13052 struct lpfc_wcqe_release *wcqe)
13054 /* sanity check on queue memory */
13055 if (unlikely(!phba->sli4_hba.els_wq))
13057 /* Check for the slow-path ELS work queue */
13058 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13059 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13060 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13062 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13063 "2579 Slow-path wqe consume event carries "
13064 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13065 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13066 phba->sli4_hba.els_wq->queue_id);
13070 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13071 * @phba: Pointer to HBA context object.
13072 * @cq: Pointer to a WQ completion queue.
13073 * @wcqe: Pointer to work-queue completion queue entry.
13075 * This routine handles an XRI abort event.
13077 * Return: true if work posted to worker thread, otherwise false.
13080 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13081 struct lpfc_queue *cq,
13082 struct sli4_wcqe_xri_aborted *wcqe)
13084 bool workposted = false;
13085 struct lpfc_cq_event *cq_event;
13086 unsigned long iflags;
13088 switch (cq->subtype) {
13090 cq_event = lpfc_cq_event_setup(
13091 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13094 spin_lock_irqsave(&phba->hbalock, iflags);
13095 list_add_tail(&cq_event->list,
13096 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
13097 /* Set the fcp xri abort event flag */
13098 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
13099 spin_unlock_irqrestore(&phba->hbalock, iflags);
13102 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13104 cq_event = lpfc_cq_event_setup(
13105 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13108 spin_lock_irqsave(&phba->hbalock, iflags);
13109 list_add_tail(&cq_event->list,
13110 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13111 /* Set the els xri abort event flag */
13112 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13113 spin_unlock_irqrestore(&phba->hbalock, iflags);
13117 /* Notify aborted XRI for NVME work queue */
13118 if (phba->nvmet_support)
13119 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13121 lpfc_sli4_nvme_xri_aborted(phba, wcqe);
13123 workposted = false;
13126 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13127 "0603 Invalid CQ subtype %d: "
13128 "%08x %08x %08x %08x\n",
13129 cq->subtype, wcqe->word0, wcqe->parameter,
13130 wcqe->word2, wcqe->word3);
13131 workposted = false;
13138 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13139 * @phba: Pointer to HBA context object.
13140 * @rcqe: Pointer to receive-queue completion queue entry.
13142 * This routine process a receive-queue completion queue entry.
13144 * Return: true if work posted to worker thread, otherwise false.
13147 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13149 bool workposted = false;
13150 struct fc_frame_header *fc_hdr;
13151 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13152 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13153 struct lpfc_nvmet_tgtport *tgtp;
13154 struct hbq_dmabuf *dma_buf;
13155 uint32_t status, rq_id;
13156 unsigned long iflags;
13158 /* sanity check on queue memory */
13159 if (unlikely(!hrq) || unlikely(!drq))
13162 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13163 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13165 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13166 if (rq_id != hrq->queue_id)
13169 status = bf_get(lpfc_rcqe_status, rcqe);
13171 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13172 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13173 "2537 Receive Frame Truncated!!\n");
13174 case FC_STATUS_RQ_SUCCESS:
13175 spin_lock_irqsave(&phba->hbalock, iflags);
13176 lpfc_sli4_rq_release(hrq, drq);
13177 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13179 hrq->RQ_no_buf_found++;
13180 spin_unlock_irqrestore(&phba->hbalock, iflags);
13184 hrq->RQ_buf_posted--;
13185 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13187 /* If a NVME LS event (type 0x28), treat it as Fast path */
13188 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13190 /* save off the frame for the word thread to process */
13191 list_add_tail(&dma_buf->cq_event.list,
13192 &phba->sli4_hba.sp_queue_event);
13193 /* Frame received */
13194 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13195 spin_unlock_irqrestore(&phba->hbalock, iflags);
13198 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13199 if (phba->nvmet_support) {
13200 tgtp = phba->targetport->private;
13201 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13202 "6402 RQE Error x%x, posted %d err_cnt "
13204 status, hrq->RQ_buf_posted,
13205 hrq->RQ_no_posted_buf,
13206 atomic_read(&tgtp->rcv_fcp_cmd_in),
13207 atomic_read(&tgtp->rcv_fcp_cmd_out),
13208 atomic_read(&tgtp->xmt_fcp_release));
13212 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13213 hrq->RQ_no_posted_buf++;
13214 /* Post more buffers if possible */
13215 spin_lock_irqsave(&phba->hbalock, iflags);
13216 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13217 spin_unlock_irqrestore(&phba->hbalock, iflags);
13226 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13227 * @phba: Pointer to HBA context object.
13228 * @cq: Pointer to the completion queue.
13229 * @wcqe: Pointer to a completion queue entry.
13231 * This routine process a slow-path work-queue or receive queue completion queue
13234 * Return: true if work posted to worker thread, otherwise false.
13237 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13238 struct lpfc_cqe *cqe)
13240 struct lpfc_cqe cqevt;
13241 bool workposted = false;
13243 /* Copy the work queue CQE and convert endian order if needed */
13244 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13246 /* Check and process for different type of WCQE and dispatch */
13247 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13248 case CQE_CODE_COMPL_WQE:
13249 /* Process the WQ/RQ complete event */
13250 phba->last_completion_time = jiffies;
13251 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13252 (struct lpfc_wcqe_complete *)&cqevt);
13254 case CQE_CODE_RELEASE_WQE:
13255 /* Process the WQ release event */
13256 lpfc_sli4_sp_handle_rel_wcqe(phba,
13257 (struct lpfc_wcqe_release *)&cqevt);
13259 case CQE_CODE_XRI_ABORTED:
13260 /* Process the WQ XRI abort event */
13261 phba->last_completion_time = jiffies;
13262 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13263 (struct sli4_wcqe_xri_aborted *)&cqevt);
13265 case CQE_CODE_RECEIVE:
13266 case CQE_CODE_RECEIVE_V1:
13267 /* Process the RQ event */
13268 phba->last_completion_time = jiffies;
13269 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13270 (struct lpfc_rcqe *)&cqevt);
13273 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13274 "0388 Not a valid WCQE code: x%x\n",
13275 bf_get(lpfc_cqe_code, &cqevt));
13282 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13283 * @phba: Pointer to HBA context object.
13284 * @eqe: Pointer to fast-path event queue entry.
13286 * This routine process a event queue entry from the slow-path event queue.
13287 * It will check the MajorCode and MinorCode to determine this is for a
13288 * completion event on a completion queue, if not, an error shall be logged
13289 * and just return. Otherwise, it will get to the corresponding completion
13290 * queue and process all the entries on that completion queue, rearm the
13291 * completion queue, and then return.
13295 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13296 struct lpfc_queue *speq)
13298 struct lpfc_queue *cq = NULL, *childq;
13301 /* Get the reference to the corresponding CQ */
13302 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13304 list_for_each_entry(childq, &speq->child_list, list) {
13305 if (childq->queue_id == cqid) {
13310 if (unlikely(!cq)) {
13311 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13313 "0365 Slow-path CQ identifier "
13314 "(%d) does not exist\n", cqid);
13318 /* Save EQ associated with this CQ */
13319 cq->assoc_qp = speq;
13321 if (!queue_work(phba->wq, &cq->spwork))
13322 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13323 "0390 Cannot schedule soft IRQ "
13324 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13325 cqid, cq->queue_id, smp_processor_id());
13329 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13330 * @phba: Pointer to HBA context object.
13332 * This routine process a event queue entry from the slow-path event queue.
13333 * It will check the MajorCode and MinorCode to determine this is for a
13334 * completion event on a completion queue, if not, an error shall be logged
13335 * and just return. Otherwise, it will get to the corresponding completion
13336 * queue and process all the entries on that completion queue, rearm the
13337 * completion queue, and then return.
13341 lpfc_sli4_sp_process_cq(struct work_struct *work)
13343 struct lpfc_queue *cq =
13344 container_of(work, struct lpfc_queue, spwork);
13345 struct lpfc_hba *phba = cq->phba;
13346 struct lpfc_cqe *cqe;
13347 bool workposted = false;
13350 /* Process all the entries to the CQ */
13351 switch (cq->type) {
13353 while ((cqe = lpfc_sli4_cq_get(cq))) {
13354 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
13355 if (!(++ccount % cq->entry_repost))
13361 while ((cqe = lpfc_sli4_cq_get(cq))) {
13362 if (cq->subtype == LPFC_FCP ||
13363 cq->subtype == LPFC_NVME) {
13364 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13365 if (phba->ktime_on)
13366 cq->isr_timestamp = ktime_get_ns();
13368 cq->isr_timestamp = 0;
13370 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
13373 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
13376 if (!(++ccount % cq->entry_repost))
13380 /* Track the max number of CQEs processed in 1 EQ */
13381 if (ccount > cq->CQ_max_cqe)
13382 cq->CQ_max_cqe = ccount;
13385 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13386 "0370 Invalid completion queue type (%d)\n",
13391 /* Catch the no cq entry condition, log an error */
13392 if (unlikely(ccount == 0))
13393 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13394 "0371 No entry from the CQ: identifier "
13395 "(x%x), type (%d)\n", cq->queue_id, cq->type);
13397 /* In any case, flash and re-arm the RCQ */
13398 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
13400 /* wake up worker thread if there are works to be done */
13402 lpfc_worker_wake_up(phba);
13406 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13407 * @phba: Pointer to HBA context object.
13408 * @cq: Pointer to associated CQ
13409 * @wcqe: Pointer to work-queue completion queue entry.
13411 * This routine process a fast-path work queue completion entry from fast-path
13412 * event queue for FCP command response completion.
13415 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13416 struct lpfc_wcqe_complete *wcqe)
13418 struct lpfc_sli_ring *pring = cq->pring;
13419 struct lpfc_iocbq *cmdiocbq;
13420 struct lpfc_iocbq irspiocbq;
13421 unsigned long iflags;
13423 /* Check for response status */
13424 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13425 /* If resource errors reported from HBA, reduce queue
13426 * depth of the SCSI device.
13428 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13429 IOSTAT_LOCAL_REJECT)) &&
13430 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13431 IOERR_NO_RESOURCES))
13432 phba->lpfc_rampdown_queue_depth(phba);
13434 /* Log the error status */
13435 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13436 "0373 FCP CQE error: status=x%x: "
13437 "CQE: %08x %08x %08x %08x\n",
13438 bf_get(lpfc_wcqe_c_status, wcqe),
13439 wcqe->word0, wcqe->total_data_placed,
13440 wcqe->parameter, wcqe->word3);
13443 /* Look up the FCP command IOCB and create pseudo response IOCB */
13444 spin_lock_irqsave(&pring->ring_lock, iflags);
13445 pring->stats.iocb_event++;
13446 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13447 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13448 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13449 if (unlikely(!cmdiocbq)) {
13450 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13451 "0374 FCP complete with no corresponding "
13452 "cmdiocb: iotag (%d)\n",
13453 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13456 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13457 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13459 if (cmdiocbq->iocb_cmpl == NULL) {
13460 if (cmdiocbq->wqe_cmpl) {
13461 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13462 spin_lock_irqsave(&phba->hbalock, iflags);
13463 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13464 spin_unlock_irqrestore(&phba->hbalock, iflags);
13467 /* Pass the cmd_iocb and the wcqe to the upper layer */
13468 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13471 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13472 "0375 FCP cmdiocb not callback function "
13474 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13478 /* Fake the irspiocb and copy necessary response information */
13479 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13481 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13482 spin_lock_irqsave(&phba->hbalock, iflags);
13483 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13484 spin_unlock_irqrestore(&phba->hbalock, iflags);
13487 /* Pass the cmd_iocb and the rsp state to the upper layer */
13488 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13492 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13493 * @phba: Pointer to HBA context object.
13494 * @cq: Pointer to completion queue.
13495 * @wcqe: Pointer to work-queue completion queue entry.
13497 * This routine handles an fast-path WQ entry consumed event by invoking the
13498 * proper WQ release routine to the slow-path WQ.
13501 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13502 struct lpfc_wcqe_release *wcqe)
13504 struct lpfc_queue *childwq;
13505 bool wqid_matched = false;
13508 /* Check for fast-path FCP work queue release */
13509 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13510 list_for_each_entry(childwq, &cq->child_list, list) {
13511 if (childwq->queue_id == hba_wqid) {
13512 lpfc_sli4_wq_release(childwq,
13513 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13514 if (childwq->q_flag & HBA_NVMET_WQFULL)
13515 lpfc_nvmet_wqfull_process(phba, childwq);
13516 wqid_matched = true;
13520 /* Report warning log message if no match found */
13521 if (wqid_matched != true)
13522 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13523 "2580 Fast-path wqe consume event carries "
13524 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13528 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13529 * @phba: Pointer to HBA context object.
13530 * @rcqe: Pointer to receive-queue completion queue entry.
13532 * This routine process a receive-queue completion queue entry.
13534 * Return: true if work posted to worker thread, otherwise false.
13537 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13538 struct lpfc_rcqe *rcqe)
13540 bool workposted = false;
13541 struct lpfc_queue *hrq;
13542 struct lpfc_queue *drq;
13543 struct rqb_dmabuf *dma_buf;
13544 struct fc_frame_header *fc_hdr;
13545 struct lpfc_nvmet_tgtport *tgtp;
13546 uint32_t status, rq_id;
13547 unsigned long iflags;
13548 uint32_t fctl, idx;
13550 if ((phba->nvmet_support == 0) ||
13551 (phba->sli4_hba.nvmet_cqset == NULL))
13554 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13555 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13556 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13558 /* sanity check on queue memory */
13559 if (unlikely(!hrq) || unlikely(!drq))
13562 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13563 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13565 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13567 if ((phba->nvmet_support == 0) ||
13568 (rq_id != hrq->queue_id))
13571 status = bf_get(lpfc_rcqe_status, rcqe);
13573 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13574 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13575 "6126 Receive Frame Truncated!!\n");
13577 case FC_STATUS_RQ_SUCCESS:
13578 spin_lock_irqsave(&phba->hbalock, iflags);
13579 lpfc_sli4_rq_release(hrq, drq);
13580 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13582 hrq->RQ_no_buf_found++;
13583 spin_unlock_irqrestore(&phba->hbalock, iflags);
13586 spin_unlock_irqrestore(&phba->hbalock, iflags);
13588 hrq->RQ_buf_posted--;
13589 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13591 /* Just some basic sanity checks on FCP Command frame */
13592 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13593 fc_hdr->fh_f_ctl[1] << 8 |
13594 fc_hdr->fh_f_ctl[2]);
13596 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13597 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13598 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13601 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13602 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
13603 lpfc_nvmet_unsol_fcp_event(
13604 phba, idx, dma_buf,
13605 cq->isr_timestamp);
13609 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13611 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13612 if (phba->nvmet_support) {
13613 tgtp = phba->targetport->private;
13614 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13615 "6401 RQE Error x%x, posted %d err_cnt "
13617 status, hrq->RQ_buf_posted,
13618 hrq->RQ_no_posted_buf,
13619 atomic_read(&tgtp->rcv_fcp_cmd_in),
13620 atomic_read(&tgtp->rcv_fcp_cmd_out),
13621 atomic_read(&tgtp->xmt_fcp_release));
13625 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13626 hrq->RQ_no_posted_buf++;
13627 /* Post more buffers if possible */
13635 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
13636 * @cq: Pointer to the completion queue.
13637 * @eqe: Pointer to fast-path completion queue entry.
13639 * This routine process a fast-path work queue completion entry from fast-path
13640 * event queue for FCP command response completion.
13643 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13644 struct lpfc_cqe *cqe)
13646 struct lpfc_wcqe_release wcqe;
13647 bool workposted = false;
13649 /* Copy the work queue CQE and convert endian order if needed */
13650 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
13652 /* Check and process for different type of WCQE and dispatch */
13653 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
13654 case CQE_CODE_COMPL_WQE:
13655 case CQE_CODE_NVME_ERSP:
13657 /* Process the WQ complete event */
13658 phba->last_completion_time = jiffies;
13659 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
13660 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13661 (struct lpfc_wcqe_complete *)&wcqe);
13662 if (cq->subtype == LPFC_NVME_LS)
13663 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13664 (struct lpfc_wcqe_complete *)&wcqe);
13666 case CQE_CODE_RELEASE_WQE:
13667 cq->CQ_release_wqe++;
13668 /* Process the WQ release event */
13669 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
13670 (struct lpfc_wcqe_release *)&wcqe);
13672 case CQE_CODE_XRI_ABORTED:
13673 cq->CQ_xri_aborted++;
13674 /* Process the WQ XRI abort event */
13675 phba->last_completion_time = jiffies;
13676 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13677 (struct sli4_wcqe_xri_aborted *)&wcqe);
13679 case CQE_CODE_RECEIVE_V1:
13680 case CQE_CODE_RECEIVE:
13681 phba->last_completion_time = jiffies;
13682 if (cq->subtype == LPFC_NVMET) {
13683 workposted = lpfc_sli4_nvmet_handle_rcqe(
13684 phba, cq, (struct lpfc_rcqe *)&wcqe);
13688 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13689 "0144 Not a valid CQE code: x%x\n",
13690 bf_get(lpfc_wcqe_c_code, &wcqe));
13697 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
13698 * @phba: Pointer to HBA context object.
13699 * @eqe: Pointer to fast-path event queue entry.
13701 * This routine process a event queue entry from the fast-path event queue.
13702 * It will check the MajorCode and MinorCode to determine this is for a
13703 * completion event on a completion queue, if not, an error shall be logged
13704 * and just return. Otherwise, it will get to the corresponding completion
13705 * queue and process all the entries on the completion queue, rearm the
13706 * completion queue, and then return.
13709 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13712 struct lpfc_queue *cq = NULL;
13715 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13716 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13717 "0366 Not a valid completion "
13718 "event: majorcode=x%x, minorcode=x%x\n",
13719 bf_get_le32(lpfc_eqe_major_code, eqe),
13720 bf_get_le32(lpfc_eqe_minor_code, eqe));
13724 /* Get the reference to the corresponding CQ */
13725 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13727 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
13728 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
13729 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
13730 /* Process NVMET unsol rcv */
13731 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
13736 if (phba->sli4_hba.nvme_cq_map &&
13737 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
13738 /* Process NVME / NVMET command completion */
13739 cq = phba->sli4_hba.nvme_cq[qidx];
13743 if (phba->sli4_hba.fcp_cq_map &&
13744 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
13745 /* Process FCP command completion */
13746 cq = phba->sli4_hba.fcp_cq[qidx];
13750 if (phba->sli4_hba.nvmels_cq &&
13751 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
13752 /* Process NVME unsol rcv */
13753 cq = phba->sli4_hba.nvmels_cq;
13756 /* Otherwise this is a Slow path event */
13758 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
13763 if (unlikely(cqid != cq->queue_id)) {
13764 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13765 "0368 Miss-matched fast-path completion "
13766 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
13767 cqid, cq->queue_id);
13771 /* Save EQ associated with this CQ */
13772 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
13774 if (!queue_work(phba->wq, &cq->irqwork))
13775 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13776 "0363 Cannot schedule soft IRQ "
13777 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13778 cqid, cq->queue_id, smp_processor_id());
13782 * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
13783 * @phba: Pointer to HBA context object.
13784 * @eqe: Pointer to fast-path event queue entry.
13786 * This routine process a event queue entry from the fast-path event queue.
13787 * It will check the MajorCode and MinorCode to determine this is for a
13788 * completion event on a completion queue, if not, an error shall be logged
13789 * and just return. Otherwise, it will get to the corresponding completion
13790 * queue and process all the entries on the completion queue, rearm the
13791 * completion queue, and then return.
13794 lpfc_sli4_hba_process_cq(struct work_struct *work)
13796 struct lpfc_queue *cq =
13797 container_of(work, struct lpfc_queue, irqwork);
13798 struct lpfc_hba *phba = cq->phba;
13799 struct lpfc_cqe *cqe;
13800 bool workposted = false;
13803 /* Process all the entries to the CQ */
13804 while ((cqe = lpfc_sli4_cq_get(cq))) {
13805 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13806 if (phba->ktime_on)
13807 cq->isr_timestamp = ktime_get_ns();
13809 cq->isr_timestamp = 0;
13811 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13812 if (!(++ccount % cq->entry_repost))
13816 /* Track the max number of CQEs processed in 1 EQ */
13817 if (ccount > cq->CQ_max_cqe)
13818 cq->CQ_max_cqe = ccount;
13819 cq->assoc_qp->EQ_cqe_cnt += ccount;
13821 /* Catch the no cq entry condition */
13822 if (unlikely(ccount == 0))
13823 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13824 "0369 No entry from fast-path completion "
13825 "queue fcpcqid=%d\n", cq->queue_id);
13827 /* In any case, flash and re-arm the CQ */
13828 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
13830 /* wake up worker thread if there are works to be done */
13832 lpfc_worker_wake_up(phba);
13836 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
13838 struct lpfc_eqe *eqe;
13840 /* walk all the EQ entries and drop on the floor */
13841 while ((eqe = lpfc_sli4_eq_get(eq)))
13844 /* Clear and re-arm the EQ */
13845 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
13850 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
13852 * @phba: Pointer to HBA context object.
13853 * @eqe: Pointer to fast-path event queue entry.
13855 * This routine process a event queue entry from the Flash Optimized Fabric
13856 * event queue. It will check the MajorCode and MinorCode to determine this
13857 * is for a completion event on a completion queue, if not, an error shall be
13858 * logged and just return. Otherwise, it will get to the corresponding
13859 * completion queue and process all the entries on the completion queue, rearm
13860 * the completion queue, and then return.
13863 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13865 struct lpfc_queue *cq;
13868 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13869 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13870 "9147 Not a valid completion "
13871 "event: majorcode=x%x, minorcode=x%x\n",
13872 bf_get_le32(lpfc_eqe_major_code, eqe),
13873 bf_get_le32(lpfc_eqe_minor_code, eqe));
13877 /* Get the reference to the corresponding CQ */
13878 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13880 /* Next check for OAS */
13881 cq = phba->sli4_hba.oas_cq;
13882 if (unlikely(!cq)) {
13883 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13885 "9148 OAS completion queue "
13886 "does not exist\n");
13890 if (unlikely(cqid != cq->queue_id)) {
13891 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13892 "9149 Miss-matched fast-path compl "
13893 "queue id: eqcqid=%d, fcpcqid=%d\n",
13894 cqid, cq->queue_id);
13898 /* Save EQ associated with this CQ */
13899 cq->assoc_qp = phba->sli4_hba.fof_eq;
13901 /* CQ work will be processed on CPU affinitized to this IRQ */
13902 if (!queue_work(phba->wq, &cq->irqwork))
13903 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13904 "0367 Cannot schedule soft IRQ "
13905 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13906 cqid, cq->queue_id, smp_processor_id());
13910 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
13911 * @irq: Interrupt number.
13912 * @dev_id: The device context pointer.
13914 * This function is directly called from the PCI layer as an interrupt
13915 * service routine when device with SLI-4 interface spec is enabled with
13916 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
13917 * IOCB ring event in the HBA. However, when the device is enabled with either
13918 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13919 * device-level interrupt handler. When the PCI slot is in error recovery
13920 * or the HBA is undergoing initialization, the interrupt handler will not
13921 * process the interrupt. The Flash Optimized Fabric ring event are handled in
13922 * the intrrupt context. This function is called without any lock held.
13923 * It gets the hbalock to access and update SLI data structures. Note that,
13924 * the EQ to CQ are one-to-one map such that the EQ index is
13925 * equal to that of CQ index.
13927 * This function returns IRQ_HANDLED when interrupt is handled else it
13928 * returns IRQ_NONE.
13931 lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
13933 struct lpfc_hba *phba;
13934 struct lpfc_hba_eq_hdl *hba_eq_hdl;
13935 struct lpfc_queue *eq;
13936 struct lpfc_eqe *eqe;
13937 unsigned long iflag;
13940 /* Get the driver's phba structure from the dev_id */
13941 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13942 phba = hba_eq_hdl->phba;
13944 if (unlikely(!phba))
13947 /* Get to the EQ struct associated with this vector */
13948 eq = phba->sli4_hba.fof_eq;
13952 /* Check device state for handling interrupt */
13953 if (unlikely(lpfc_intr_state_check(phba))) {
13954 /* Check again for link_state with lock held */
13955 spin_lock_irqsave(&phba->hbalock, iflag);
13956 if (phba->link_state < LPFC_LINK_DOWN)
13957 /* Flush, clear interrupt, and rearm the EQ */
13958 lpfc_sli4_eq_flush(phba, eq);
13959 spin_unlock_irqrestore(&phba->hbalock, iflag);
13964 * Process all the event on FCP fast-path EQ
13966 while ((eqe = lpfc_sli4_eq_get(eq))) {
13967 lpfc_sli4_fof_handle_eqe(phba, eqe);
13968 if (!(++ecount % eq->entry_repost))
13970 eq->EQ_processed++;
13973 /* Track the max number of EQEs processed in 1 intr */
13974 if (ecount > eq->EQ_max_eqe)
13975 eq->EQ_max_eqe = ecount;
13978 if (unlikely(ecount == 0)) {
13981 if (phba->intr_type == MSIX)
13982 /* MSI-X treated interrupt served as no EQ share INT */
13983 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13984 "9145 MSI-X interrupt with no EQE\n");
13986 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13987 "9146 ISR interrupt with no EQE\n");
13988 /* Non MSI-X treated on interrupt as EQ share INT */
13992 /* Always clear and re-arm the fast-path EQ */
13993 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
13994 return IRQ_HANDLED;
13998 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
13999 * @irq: Interrupt number.
14000 * @dev_id: The device context pointer.
14002 * This function is directly called from the PCI layer as an interrupt
14003 * service routine when device with SLI-4 interface spec is enabled with
14004 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14005 * ring event in the HBA. However, when the device is enabled with either
14006 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14007 * device-level interrupt handler. When the PCI slot is in error recovery
14008 * or the HBA is undergoing initialization, the interrupt handler will not
14009 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14010 * the intrrupt context. This function is called without any lock held.
14011 * It gets the hbalock to access and update SLI data structures. Note that,
14012 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14013 * equal to that of FCP CQ index.
14015 * The link attention and ELS ring attention events are handled
14016 * by the worker thread. The interrupt handler signals the worker thread
14017 * and returns for these events. This function is called without any lock
14018 * held. It gets the hbalock to access and update SLI data structures.
14020 * This function returns IRQ_HANDLED when interrupt is handled else it
14021 * returns IRQ_NONE.
14024 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14026 struct lpfc_hba *phba;
14027 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14028 struct lpfc_queue *fpeq;
14029 struct lpfc_eqe *eqe;
14030 unsigned long iflag;
14034 /* Get the driver's phba structure from the dev_id */
14035 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14036 phba = hba_eq_hdl->phba;
14037 hba_eqidx = hba_eq_hdl->idx;
14039 if (unlikely(!phba))
14041 if (unlikely(!phba->sli4_hba.hba_eq))
14044 /* Get to the EQ struct associated with this vector */
14045 fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
14046 if (unlikely(!fpeq))
14049 if (lpfc_fcp_look_ahead) {
14050 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
14051 phba->sli4_hba.sli4_eq_clr_intr(fpeq);
14053 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14058 /* Check device state for handling interrupt */
14059 if (unlikely(lpfc_intr_state_check(phba))) {
14060 /* Check again for link_state with lock held */
14061 spin_lock_irqsave(&phba->hbalock, iflag);
14062 if (phba->link_state < LPFC_LINK_DOWN)
14063 /* Flush, clear interrupt, and rearm the EQ */
14064 lpfc_sli4_eq_flush(phba, fpeq);
14065 spin_unlock_irqrestore(&phba->hbalock, iflag);
14066 if (lpfc_fcp_look_ahead)
14067 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14072 * Process all the event on FCP fast-path EQ
14074 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
14075 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
14076 if (!(++ecount % fpeq->entry_repost))
14078 fpeq->EQ_processed++;
14081 /* Track the max number of EQEs processed in 1 intr */
14082 if (ecount > fpeq->EQ_max_eqe)
14083 fpeq->EQ_max_eqe = ecount;
14085 /* Always clear and re-arm the fast-path EQ */
14086 phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
14088 if (unlikely(ecount == 0)) {
14089 fpeq->EQ_no_entry++;
14091 if (lpfc_fcp_look_ahead) {
14092 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14096 if (phba->intr_type == MSIX)
14097 /* MSI-X treated interrupt served as no EQ share INT */
14098 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14099 "0358 MSI-X interrupt with no EQE\n");
14101 /* Non MSI-X treated on interrupt as EQ share INT */
14105 if (lpfc_fcp_look_ahead)
14106 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14108 return IRQ_HANDLED;
14109 } /* lpfc_sli4_fp_intr_handler */
14112 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14113 * @irq: Interrupt number.
14114 * @dev_id: The device context pointer.
14116 * This function is the device-level interrupt handler to device with SLI-4
14117 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14118 * interrupt mode is enabled and there is an event in the HBA which requires
14119 * driver attention. This function invokes the slow-path interrupt attention
14120 * handling function and fast-path interrupt attention handling function in
14121 * turn to process the relevant HBA attention events. This function is called
14122 * without any lock held. It gets the hbalock to access and update SLI data
14125 * This function returns IRQ_HANDLED when interrupt is handled, else it
14126 * returns IRQ_NONE.
14129 lpfc_sli4_intr_handler(int irq, void *dev_id)
14131 struct lpfc_hba *phba;
14132 irqreturn_t hba_irq_rc;
14133 bool hba_handled = false;
14136 /* Get the driver's phba structure from the dev_id */
14137 phba = (struct lpfc_hba *)dev_id;
14139 if (unlikely(!phba))
14143 * Invoke fast-path host attention interrupt handling as appropriate.
14145 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
14146 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14147 &phba->sli4_hba.hba_eq_hdl[qidx]);
14148 if (hba_irq_rc == IRQ_HANDLED)
14149 hba_handled |= true;
14152 if (phba->cfg_fof) {
14153 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
14154 &phba->sli4_hba.hba_eq_hdl[qidx]);
14155 if (hba_irq_rc == IRQ_HANDLED)
14156 hba_handled |= true;
14159 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14160 } /* lpfc_sli4_intr_handler */
14163 * lpfc_sli4_queue_free - free a queue structure and associated memory
14164 * @queue: The queue structure to free.
14166 * This function frees a queue structure and the DMAable memory used for
14167 * the host resident queue. This function must be called after destroying the
14168 * queue on the HBA.
14171 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14173 struct lpfc_dmabuf *dmabuf;
14178 while (!list_empty(&queue->page_list)) {
14179 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14181 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14182 dmabuf->virt, dmabuf->phys);
14186 lpfc_free_rq_buffer(queue->phba, queue);
14187 kfree(queue->rqbp);
14190 if (!list_empty(&queue->wq_list))
14191 list_del(&queue->wq_list);
14198 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14199 * @phba: The HBA that this queue is being created on.
14200 * @page_size: The size of a queue page
14201 * @entry_size: The size of each queue entry for this queue.
14202 * @entry count: The number of entries that this queue will handle.
14204 * This function allocates a queue structure and the DMAable memory used for
14205 * the host resident queue. This function must be called before creating the
14206 * queue on the HBA.
14208 struct lpfc_queue *
14209 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14210 uint32_t entry_size, uint32_t entry_count)
14212 struct lpfc_queue *queue;
14213 struct lpfc_dmabuf *dmabuf;
14214 int x, total_qe_count;
14216 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14218 if (!phba->sli4_hba.pc_sli4_params.supported)
14219 hw_page_size = page_size;
14221 queue = kzalloc(sizeof(struct lpfc_queue) +
14222 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
14225 queue->page_count = (ALIGN(entry_size * entry_count,
14226 hw_page_size))/hw_page_size;
14228 /* If needed, Adjust page count to match the max the adapter supports */
14229 if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
14230 (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
14231 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
14233 INIT_LIST_HEAD(&queue->list);
14234 INIT_LIST_HEAD(&queue->wq_list);
14235 INIT_LIST_HEAD(&queue->wqfull_list);
14236 INIT_LIST_HEAD(&queue->page_list);
14237 INIT_LIST_HEAD(&queue->child_list);
14239 /* Set queue parameters now. If the system cannot provide memory
14240 * resources, the free routine needs to know what was allocated.
14242 queue->entry_size = entry_size;
14243 queue->entry_count = entry_count;
14244 queue->page_size = hw_page_size;
14245 queue->phba = phba;
14247 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
14248 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
14251 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
14252 hw_page_size, &dmabuf->phys,
14254 if (!dmabuf->virt) {
14258 dmabuf->buffer_tag = x;
14259 list_add_tail(&dmabuf->list, &queue->page_list);
14260 /* initialize queue's entry array */
14261 dma_pointer = dmabuf->virt;
14262 for (; total_qe_count < entry_count &&
14263 dma_pointer < (hw_page_size + dmabuf->virt);
14264 total_qe_count++, dma_pointer += entry_size) {
14265 queue->qe[total_qe_count].address = dma_pointer;
14268 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14269 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14271 /* entry_repost will be set during q creation */
14275 lpfc_sli4_queue_free(queue);
14280 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14281 * @phba: HBA structure that indicates port to create a queue on.
14282 * @pci_barset: PCI BAR set flag.
14284 * This function shall perform iomap of the specified PCI BAR address to host
14285 * memory address if not already done so and return it. The returned host
14286 * memory address can be NULL.
14288 static void __iomem *
14289 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14294 switch (pci_barset) {
14295 case WQ_PCI_BAR_0_AND_1:
14296 return phba->pci_bar0_memmap_p;
14297 case WQ_PCI_BAR_2_AND_3:
14298 return phba->pci_bar2_memmap_p;
14299 case WQ_PCI_BAR_4_AND_5:
14300 return phba->pci_bar4_memmap_p;
14308 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
14309 * @phba: HBA structure that indicates port to create a queue on.
14310 * @startq: The starting FCP EQ to modify
14312 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
14313 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
14314 * updated in one mailbox command.
14316 * The @phba struct is used to send mailbox command to HBA. The @startq
14317 * is used to get the starting FCP EQ to change.
14318 * This function is asynchronous and will wait for the mailbox
14319 * command to finish before continuing.
14321 * On success this function will return a zero. If unable to allocate enough
14322 * memory this function will return -ENOMEM. If the queue create mailbox command
14323 * fails this function will return -ENXIO.
14326 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14327 uint32_t numq, uint32_t imax)
14329 struct lpfc_mbx_modify_eq_delay *eq_delay;
14330 LPFC_MBOXQ_t *mbox;
14331 struct lpfc_queue *eq;
14332 int cnt, rc, length, status = 0;
14333 uint32_t shdr_status, shdr_add_status;
14334 uint32_t result, val;
14336 union lpfc_sli4_cfg_shdr *shdr;
14339 if (startq >= phba->io_channel_irqs)
14342 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14345 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14346 sizeof(struct lpfc_sli4_cfg_mhdr));
14347 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14348 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14349 length, LPFC_SLI4_MBX_EMBED);
14350 eq_delay = &mbox->u.mqe.un.eq_delay;
14352 /* Calculate delay multiper from maximum interrupt per second */
14353 result = imax / phba->io_channel_irqs;
14354 if (result > LPFC_DMULT_CONST || result == 0)
14357 dmult = LPFC_DMULT_CONST/result - 1;
14358 if (dmult > LPFC_DMULT_MAX)
14359 dmult = LPFC_DMULT_MAX;
14362 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
14363 eq = phba->sli4_hba.hba_eq[qidx];
14367 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14368 eq_delay->u.request.eq[cnt].phase = 0;
14369 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14372 /* q_mode is only used for auto_imax */
14373 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14374 /* Use EQ Delay Register method for q_mode */
14376 /* Convert for EQ Delay register */
14377 val = phba->cfg_fcp_imax;
14379 /* First, interrupts per sec per EQ */
14380 val = phba->cfg_fcp_imax /
14381 phba->io_channel_irqs;
14383 /* us delay between each interrupt */
14384 val = LPFC_SEC_TO_USEC / val;
14394 eq_delay->u.request.num_eq = cnt;
14396 mbox->vport = phba->pport;
14397 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14398 mbox->context1 = NULL;
14399 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14400 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14401 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14402 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14403 if (shdr_status || shdr_add_status || rc) {
14404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14405 "2512 MODIFY_EQ_DELAY mailbox failed with "
14406 "status x%x add_status x%x, mbx status x%x\n",
14407 shdr_status, shdr_add_status, rc);
14410 mempool_free(mbox, phba->mbox_mem_pool);
14415 * lpfc_eq_create - Create an Event Queue on the HBA
14416 * @phba: HBA structure that indicates port to create a queue on.
14417 * @eq: The queue structure to use to create the event queue.
14418 * @imax: The maximum interrupt per second limit.
14420 * This function creates an event queue, as detailed in @eq, on a port,
14421 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14423 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14424 * is used to get the entry count and entry size that are necessary to
14425 * determine the number of pages to allocate and use for this queue. This
14426 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14427 * event queue. This function is asynchronous and will wait for the mailbox
14428 * command to finish before continuing.
14430 * On success this function will return a zero. If unable to allocate enough
14431 * memory this function will return -ENOMEM. If the queue create mailbox command
14432 * fails this function will return -ENXIO.
14435 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14437 struct lpfc_mbx_eq_create *eq_create;
14438 LPFC_MBOXQ_t *mbox;
14439 int rc, length, status = 0;
14440 struct lpfc_dmabuf *dmabuf;
14441 uint32_t shdr_status, shdr_add_status;
14442 union lpfc_sli4_cfg_shdr *shdr;
14444 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14446 /* sanity check on queue memory */
14449 if (!phba->sli4_hba.pc_sli4_params.supported)
14450 hw_page_size = SLI4_PAGE_SIZE;
14452 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14455 length = (sizeof(struct lpfc_mbx_eq_create) -
14456 sizeof(struct lpfc_sli4_cfg_mhdr));
14457 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14458 LPFC_MBOX_OPCODE_EQ_CREATE,
14459 length, LPFC_SLI4_MBX_EMBED);
14460 eq_create = &mbox->u.mqe.un.eq_create;
14461 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14462 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14464 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14466 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14468 /* Use version 2 of CREATE_EQ if eqav is set */
14469 if (phba->sli4_hba.pc_sli4_params.eqav) {
14470 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14471 LPFC_Q_CREATE_VERSION_2);
14472 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14473 phba->sli4_hba.pc_sli4_params.eqav);
14476 /* don't setup delay multiplier using EQ_CREATE */
14478 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14480 switch (eq->entry_count) {
14482 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14483 "0360 Unsupported EQ count. (%d)\n",
14485 if (eq->entry_count < 256)
14487 /* otherwise default to smallest count (drop through) */
14489 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14493 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14497 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14501 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14505 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14509 list_for_each_entry(dmabuf, &eq->page_list, list) {
14510 memset(dmabuf->virt, 0, hw_page_size);
14511 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14512 putPaddrLow(dmabuf->phys);
14513 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14514 putPaddrHigh(dmabuf->phys);
14516 mbox->vport = phba->pport;
14517 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14518 mbox->context1 = NULL;
14519 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14520 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14521 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14522 if (shdr_status || shdr_add_status || rc) {
14523 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14524 "2500 EQ_CREATE mailbox failed with "
14525 "status x%x add_status x%x, mbx status x%x\n",
14526 shdr_status, shdr_add_status, rc);
14529 eq->type = LPFC_EQ;
14530 eq->subtype = LPFC_NONE;
14531 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14532 if (eq->queue_id == 0xFFFF)
14534 eq->host_index = 0;
14536 eq->entry_repost = LPFC_EQ_REPOST;
14538 mempool_free(mbox, phba->mbox_mem_pool);
14543 * lpfc_cq_create - Create a Completion Queue on the HBA
14544 * @phba: HBA structure that indicates port to create a queue on.
14545 * @cq: The queue structure to use to create the completion queue.
14546 * @eq: The event queue to bind this completion queue to.
14548 * This function creates a completion queue, as detailed in @wq, on a port,
14549 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14551 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14552 * is used to get the entry count and entry size that are necessary to
14553 * determine the number of pages to allocate and use for this queue. The @eq
14554 * is used to indicate which event queue to bind this completion queue to. This
14555 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14556 * completion queue. This function is asynchronous and will wait for the mailbox
14557 * command to finish before continuing.
14559 * On success this function will return a zero. If unable to allocate enough
14560 * memory this function will return -ENOMEM. If the queue create mailbox command
14561 * fails this function will return -ENXIO.
14564 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14565 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14567 struct lpfc_mbx_cq_create *cq_create;
14568 struct lpfc_dmabuf *dmabuf;
14569 LPFC_MBOXQ_t *mbox;
14570 int rc, length, status = 0;
14571 uint32_t shdr_status, shdr_add_status;
14572 union lpfc_sli4_cfg_shdr *shdr;
14573 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14575 /* sanity check on queue memory */
14578 if (!phba->sli4_hba.pc_sli4_params.supported)
14579 hw_page_size = cq->page_size;
14581 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14584 length = (sizeof(struct lpfc_mbx_cq_create) -
14585 sizeof(struct lpfc_sli4_cfg_mhdr));
14586 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14587 LPFC_MBOX_OPCODE_CQ_CREATE,
14588 length, LPFC_SLI4_MBX_EMBED);
14589 cq_create = &mbox->u.mqe.un.cq_create;
14590 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14591 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14593 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14594 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14595 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14596 phba->sli4_hba.pc_sli4_params.cqv);
14597 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14598 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14599 (cq->page_size / SLI4_PAGE_SIZE));
14600 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14602 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14603 phba->sli4_hba.pc_sli4_params.cqav);
14605 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14608 switch (cq->entry_count) {
14611 if (phba->sli4_hba.pc_sli4_params.cqv ==
14612 LPFC_Q_CREATE_VERSION_2) {
14613 cq_create->u.request.context.lpfc_cq_context_count =
14615 bf_set(lpfc_cq_context_count,
14616 &cq_create->u.request.context,
14617 LPFC_CQ_CNT_WORD7);
14622 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14623 "0361 Unsupported CQ count: "
14624 "entry cnt %d sz %d pg cnt %d\n",
14625 cq->entry_count, cq->entry_size,
14627 if (cq->entry_count < 256) {
14631 /* otherwise default to smallest count (drop through) */
14633 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14637 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14641 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14645 list_for_each_entry(dmabuf, &cq->page_list, list) {
14646 memset(dmabuf->virt, 0, cq->page_size);
14647 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14648 putPaddrLow(dmabuf->phys);
14649 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14650 putPaddrHigh(dmabuf->phys);
14652 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14654 /* The IOCTL status is embedded in the mailbox subheader. */
14655 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14656 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14657 if (shdr_status || shdr_add_status || rc) {
14658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14659 "2501 CQ_CREATE mailbox failed with "
14660 "status x%x add_status x%x, mbx status x%x\n",
14661 shdr_status, shdr_add_status, rc);
14665 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14666 if (cq->queue_id == 0xFFFF) {
14670 /* link the cq onto the parent eq child list */
14671 list_add_tail(&cq->list, &eq->child_list);
14672 /* Set up completion queue's type and subtype */
14674 cq->subtype = subtype;
14675 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14676 cq->assoc_qid = eq->queue_id;
14677 cq->host_index = 0;
14679 cq->entry_repost = LPFC_CQ_REPOST;
14682 mempool_free(mbox, phba->mbox_mem_pool);
14687 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14688 * @phba: HBA structure that indicates port to create a queue on.
14689 * @cqp: The queue structure array to use to create the completion queues.
14690 * @eqp: The event queue array to bind these completion queues to.
14692 * This function creates a set of completion queue, s to support MRQ
14693 * as detailed in @cqp, on a port,
14694 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
14696 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14697 * is used to get the entry count and entry size that are necessary to
14698 * determine the number of pages to allocate and use for this queue. The @eq
14699 * is used to indicate which event queue to bind this completion queue to. This
14700 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
14701 * completion queue. This function is asynchronous and will wait for the mailbox
14702 * command to finish before continuing.
14704 * On success this function will return a zero. If unable to allocate enough
14705 * memory this function will return -ENOMEM. If the queue create mailbox command
14706 * fails this function will return -ENXIO.
14709 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14710 struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
14712 struct lpfc_queue *cq;
14713 struct lpfc_queue *eq;
14714 struct lpfc_mbx_cq_create_set *cq_set;
14715 struct lpfc_dmabuf *dmabuf;
14716 LPFC_MBOXQ_t *mbox;
14717 int rc, length, alloclen, status = 0;
14718 int cnt, idx, numcq, page_idx = 0;
14719 uint32_t shdr_status, shdr_add_status;
14720 union lpfc_sli4_cfg_shdr *shdr;
14721 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14723 /* sanity check on queue memory */
14724 numcq = phba->cfg_nvmet_mrq;
14725 if (!cqp || !eqp || !numcq)
14728 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14732 length = sizeof(struct lpfc_mbx_cq_create_set);
14733 length += ((numcq * cqp[0]->page_count) *
14734 sizeof(struct dma_address));
14735 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14736 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14737 LPFC_SLI4_MBX_NEMBED);
14738 if (alloclen < length) {
14739 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14740 "3098 Allocated DMA memory size (%d) is "
14741 "less than the requested DMA memory size "
14742 "(%d)\n", alloclen, length);
14746 cq_set = mbox->sge_array->addr[0];
14747 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14748 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14750 for (idx = 0; idx < numcq; idx++) {
14757 if (!phba->sli4_hba.pc_sli4_params.supported)
14758 hw_page_size = cq->page_size;
14762 bf_set(lpfc_mbx_cq_create_set_page_size,
14763 &cq_set->u.request,
14764 (hw_page_size / SLI4_PAGE_SIZE));
14765 bf_set(lpfc_mbx_cq_create_set_num_pages,
14766 &cq_set->u.request, cq->page_count);
14767 bf_set(lpfc_mbx_cq_create_set_evt,
14768 &cq_set->u.request, 1);
14769 bf_set(lpfc_mbx_cq_create_set_valid,
14770 &cq_set->u.request, 1);
14771 bf_set(lpfc_mbx_cq_create_set_cqe_size,
14772 &cq_set->u.request, 0);
14773 bf_set(lpfc_mbx_cq_create_set_num_cq,
14774 &cq_set->u.request, numcq);
14775 bf_set(lpfc_mbx_cq_create_set_autovalid,
14776 &cq_set->u.request,
14777 phba->sli4_hba.pc_sli4_params.cqav);
14778 switch (cq->entry_count) {
14781 if (phba->sli4_hba.pc_sli4_params.cqv ==
14782 LPFC_Q_CREATE_VERSION_2) {
14783 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14784 &cq_set->u.request,
14786 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14787 &cq_set->u.request,
14788 LPFC_CQ_CNT_WORD7);
14793 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14794 "3118 Bad CQ count. (%d)\n",
14796 if (cq->entry_count < 256) {
14800 /* otherwise default to smallest (drop thru) */
14802 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14803 &cq_set->u.request, LPFC_CQ_CNT_256);
14806 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14807 &cq_set->u.request, LPFC_CQ_CNT_512);
14810 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14811 &cq_set->u.request, LPFC_CQ_CNT_1024);
14814 bf_set(lpfc_mbx_cq_create_set_eq_id0,
14815 &cq_set->u.request, eq->queue_id);
14818 bf_set(lpfc_mbx_cq_create_set_eq_id1,
14819 &cq_set->u.request, eq->queue_id);
14822 bf_set(lpfc_mbx_cq_create_set_eq_id2,
14823 &cq_set->u.request, eq->queue_id);
14826 bf_set(lpfc_mbx_cq_create_set_eq_id3,
14827 &cq_set->u.request, eq->queue_id);
14830 bf_set(lpfc_mbx_cq_create_set_eq_id4,
14831 &cq_set->u.request, eq->queue_id);
14834 bf_set(lpfc_mbx_cq_create_set_eq_id5,
14835 &cq_set->u.request, eq->queue_id);
14838 bf_set(lpfc_mbx_cq_create_set_eq_id6,
14839 &cq_set->u.request, eq->queue_id);
14842 bf_set(lpfc_mbx_cq_create_set_eq_id7,
14843 &cq_set->u.request, eq->queue_id);
14846 bf_set(lpfc_mbx_cq_create_set_eq_id8,
14847 &cq_set->u.request, eq->queue_id);
14850 bf_set(lpfc_mbx_cq_create_set_eq_id9,
14851 &cq_set->u.request, eq->queue_id);
14854 bf_set(lpfc_mbx_cq_create_set_eq_id10,
14855 &cq_set->u.request, eq->queue_id);
14858 bf_set(lpfc_mbx_cq_create_set_eq_id11,
14859 &cq_set->u.request, eq->queue_id);
14862 bf_set(lpfc_mbx_cq_create_set_eq_id12,
14863 &cq_set->u.request, eq->queue_id);
14866 bf_set(lpfc_mbx_cq_create_set_eq_id13,
14867 &cq_set->u.request, eq->queue_id);
14870 bf_set(lpfc_mbx_cq_create_set_eq_id14,
14871 &cq_set->u.request, eq->queue_id);
14874 bf_set(lpfc_mbx_cq_create_set_eq_id15,
14875 &cq_set->u.request, eq->queue_id);
14879 /* link the cq onto the parent eq child list */
14880 list_add_tail(&cq->list, &eq->child_list);
14881 /* Set up completion queue's type and subtype */
14883 cq->subtype = subtype;
14884 cq->assoc_qid = eq->queue_id;
14885 cq->host_index = 0;
14887 cq->entry_repost = LPFC_CQ_REPOST;
14891 list_for_each_entry(dmabuf, &cq->page_list, list) {
14892 memset(dmabuf->virt, 0, hw_page_size);
14893 cnt = page_idx + dmabuf->buffer_tag;
14894 cq_set->u.request.page[cnt].addr_lo =
14895 putPaddrLow(dmabuf->phys);
14896 cq_set->u.request.page[cnt].addr_hi =
14897 putPaddrHigh(dmabuf->phys);
14903 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14905 /* The IOCTL status is embedded in the mailbox subheader. */
14906 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14907 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14908 if (shdr_status || shdr_add_status || rc) {
14909 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14910 "3119 CQ_CREATE_SET mailbox failed with "
14911 "status x%x add_status x%x, mbx status x%x\n",
14912 shdr_status, shdr_add_status, rc);
14916 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
14917 if (rc == 0xFFFF) {
14922 for (idx = 0; idx < numcq; idx++) {
14924 cq->queue_id = rc + idx;
14928 lpfc_sli4_mbox_cmd_free(phba, mbox);
14933 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
14934 * @phba: HBA structure that indicates port to create a queue on.
14935 * @mq: The queue structure to use to create the mailbox queue.
14936 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
14937 * @cq: The completion queue to associate with this cq.
14939 * This function provides failback (fb) functionality when the
14940 * mq_create_ext fails on older FW generations. It's purpose is identical
14941 * to mq_create_ext otherwise.
14943 * This routine cannot fail as all attributes were previously accessed and
14944 * initialized in mq_create_ext.
14947 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
14948 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
14950 struct lpfc_mbx_mq_create *mq_create;
14951 struct lpfc_dmabuf *dmabuf;
14954 length = (sizeof(struct lpfc_mbx_mq_create) -
14955 sizeof(struct lpfc_sli4_cfg_mhdr));
14956 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14957 LPFC_MBOX_OPCODE_MQ_CREATE,
14958 length, LPFC_SLI4_MBX_EMBED);
14959 mq_create = &mbox->u.mqe.un.mq_create;
14960 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
14962 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
14964 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
14965 switch (mq->entry_count) {
14967 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14968 LPFC_MQ_RING_SIZE_16);
14971 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14972 LPFC_MQ_RING_SIZE_32);
14975 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14976 LPFC_MQ_RING_SIZE_64);
14979 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14980 LPFC_MQ_RING_SIZE_128);
14983 list_for_each_entry(dmabuf, &mq->page_list, list) {
14984 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14985 putPaddrLow(dmabuf->phys);
14986 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14987 putPaddrHigh(dmabuf->phys);
14992 * lpfc_mq_create - Create a mailbox Queue on the HBA
14993 * @phba: HBA structure that indicates port to create a queue on.
14994 * @mq: The queue structure to use to create the mailbox queue.
14995 * @cq: The completion queue to associate with this cq.
14996 * @subtype: The queue's subtype.
14998 * This function creates a mailbox queue, as detailed in @mq, on a port,
14999 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15001 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15002 * is used to get the entry count and entry size that are necessary to
15003 * determine the number of pages to allocate and use for this queue. This
15004 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15005 * mailbox queue. This function is asynchronous and will wait for the mailbox
15006 * command to finish before continuing.
15008 * On success this function will return a zero. If unable to allocate enough
15009 * memory this function will return -ENOMEM. If the queue create mailbox command
15010 * fails this function will return -ENXIO.
15013 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15014 struct lpfc_queue *cq, uint32_t subtype)
15016 struct lpfc_mbx_mq_create *mq_create;
15017 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15018 struct lpfc_dmabuf *dmabuf;
15019 LPFC_MBOXQ_t *mbox;
15020 int rc, length, status = 0;
15021 uint32_t shdr_status, shdr_add_status;
15022 union lpfc_sli4_cfg_shdr *shdr;
15023 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15025 /* sanity check on queue memory */
15028 if (!phba->sli4_hba.pc_sli4_params.supported)
15029 hw_page_size = SLI4_PAGE_SIZE;
15031 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15034 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15035 sizeof(struct lpfc_sli4_cfg_mhdr));
15036 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15037 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15038 length, LPFC_SLI4_MBX_EMBED);
15040 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15041 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15042 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15043 &mq_create_ext->u.request, mq->page_count);
15044 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15045 &mq_create_ext->u.request, 1);
15046 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15047 &mq_create_ext->u.request, 1);
15048 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15049 &mq_create_ext->u.request, 1);
15050 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15051 &mq_create_ext->u.request, 1);
15052 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15053 &mq_create_ext->u.request, 1);
15054 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15055 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15056 phba->sli4_hba.pc_sli4_params.mqv);
15057 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15058 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15061 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15063 switch (mq->entry_count) {
15065 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15066 "0362 Unsupported MQ count. (%d)\n",
15068 if (mq->entry_count < 16) {
15072 /* otherwise default to smallest count (drop through) */
15074 bf_set(lpfc_mq_context_ring_size,
15075 &mq_create_ext->u.request.context,
15076 LPFC_MQ_RING_SIZE_16);
15079 bf_set(lpfc_mq_context_ring_size,
15080 &mq_create_ext->u.request.context,
15081 LPFC_MQ_RING_SIZE_32);
15084 bf_set(lpfc_mq_context_ring_size,
15085 &mq_create_ext->u.request.context,
15086 LPFC_MQ_RING_SIZE_64);
15089 bf_set(lpfc_mq_context_ring_size,
15090 &mq_create_ext->u.request.context,
15091 LPFC_MQ_RING_SIZE_128);
15094 list_for_each_entry(dmabuf, &mq->page_list, list) {
15095 memset(dmabuf->virt, 0, hw_page_size);
15096 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15097 putPaddrLow(dmabuf->phys);
15098 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15099 putPaddrHigh(dmabuf->phys);
15101 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15102 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15103 &mq_create_ext->u.response);
15104 if (rc != MBX_SUCCESS) {
15105 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15106 "2795 MQ_CREATE_EXT failed with "
15107 "status x%x. Failback to MQ_CREATE.\n",
15109 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15110 mq_create = &mbox->u.mqe.un.mq_create;
15111 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15112 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15113 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15114 &mq_create->u.response);
15117 /* The IOCTL status is embedded in the mailbox subheader. */
15118 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15119 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15120 if (shdr_status || shdr_add_status || rc) {
15121 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15122 "2502 MQ_CREATE mailbox failed with "
15123 "status x%x add_status x%x, mbx status x%x\n",
15124 shdr_status, shdr_add_status, rc);
15128 if (mq->queue_id == 0xFFFF) {
15132 mq->type = LPFC_MQ;
15133 mq->assoc_qid = cq->queue_id;
15134 mq->subtype = subtype;
15135 mq->host_index = 0;
15137 mq->entry_repost = LPFC_MQ_REPOST;
15139 /* link the mq onto the parent cq child list */
15140 list_add_tail(&mq->list, &cq->child_list);
15142 mempool_free(mbox, phba->mbox_mem_pool);
15147 * lpfc_wq_create - Create a Work Queue on the HBA
15148 * @phba: HBA structure that indicates port to create a queue on.
15149 * @wq: The queue structure to use to create the work queue.
15150 * @cq: The completion queue to bind this work queue to.
15151 * @subtype: The subtype of the work queue indicating its functionality.
15153 * This function creates a work queue, as detailed in @wq, on a port, described
15154 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15156 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15157 * is used to get the entry count and entry size that are necessary to
15158 * determine the number of pages to allocate and use for this queue. The @cq
15159 * is used to indicate which completion queue to bind this work queue to. This
15160 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15161 * work queue. This function is asynchronous and will wait for the mailbox
15162 * command to finish before continuing.
15164 * On success this function will return a zero. If unable to allocate enough
15165 * memory this function will return -ENOMEM. If the queue create mailbox command
15166 * fails this function will return -ENXIO.
15169 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15170 struct lpfc_queue *cq, uint32_t subtype)
15172 struct lpfc_mbx_wq_create *wq_create;
15173 struct lpfc_dmabuf *dmabuf;
15174 LPFC_MBOXQ_t *mbox;
15175 int rc, length, status = 0;
15176 uint32_t shdr_status, shdr_add_status;
15177 union lpfc_sli4_cfg_shdr *shdr;
15178 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15179 struct dma_address *page;
15180 void __iomem *bar_memmap_p;
15181 uint32_t db_offset;
15182 uint16_t pci_barset;
15183 uint8_t dpp_barset;
15184 uint32_t dpp_offset;
15185 unsigned long pg_addr;
15186 uint8_t wq_create_version;
15188 /* sanity check on queue memory */
15191 if (!phba->sli4_hba.pc_sli4_params.supported)
15192 hw_page_size = wq->page_size;
15194 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15197 length = (sizeof(struct lpfc_mbx_wq_create) -
15198 sizeof(struct lpfc_sli4_cfg_mhdr));
15199 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15200 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15201 length, LPFC_SLI4_MBX_EMBED);
15202 wq_create = &mbox->u.mqe.un.wq_create;
15203 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15204 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15206 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15209 /* wqv is the earliest version supported, NOT the latest */
15210 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15211 phba->sli4_hba.pc_sli4_params.wqv);
15213 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15214 (wq->page_size > SLI4_PAGE_SIZE))
15215 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15217 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15220 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15221 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15223 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15225 switch (wq_create_version) {
15226 case LPFC_Q_CREATE_VERSION_1:
15227 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15229 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15230 LPFC_Q_CREATE_VERSION_1);
15232 switch (wq->entry_size) {
15235 bf_set(lpfc_mbx_wq_create_wqe_size,
15236 &wq_create->u.request_1,
15237 LPFC_WQ_WQE_SIZE_64);
15240 bf_set(lpfc_mbx_wq_create_wqe_size,
15241 &wq_create->u.request_1,
15242 LPFC_WQ_WQE_SIZE_128);
15245 /* Request DPP by default */
15246 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15247 bf_set(lpfc_mbx_wq_create_page_size,
15248 &wq_create->u.request_1,
15249 (wq->page_size / SLI4_PAGE_SIZE));
15250 page = wq_create->u.request_1.page;
15253 page = wq_create->u.request.page;
15257 list_for_each_entry(dmabuf, &wq->page_list, list) {
15258 memset(dmabuf->virt, 0, hw_page_size);
15259 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15260 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15263 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15264 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15266 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15267 /* The IOCTL status is embedded in the mailbox subheader. */
15268 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15269 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15270 if (shdr_status || shdr_add_status || rc) {
15271 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15272 "2503 WQ_CREATE mailbox failed with "
15273 "status x%x add_status x%x, mbx status x%x\n",
15274 shdr_status, shdr_add_status, rc);
15279 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15280 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15281 &wq_create->u.response);
15283 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15284 &wq_create->u.response_1);
15286 if (wq->queue_id == 0xFFFF) {
15291 wq->db_format = LPFC_DB_LIST_FORMAT;
15292 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15293 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15294 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15295 &wq_create->u.response);
15296 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15297 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15299 "3265 WQ[%d] doorbell format "
15300 "not supported: x%x\n",
15301 wq->queue_id, wq->db_format);
15305 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15306 &wq_create->u.response);
15307 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15309 if (!bar_memmap_p) {
15310 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15311 "3263 WQ[%d] failed to memmap "
15312 "pci barset:x%x\n",
15313 wq->queue_id, pci_barset);
15317 db_offset = wq_create->u.response.doorbell_offset;
15318 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15319 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15320 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15321 "3252 WQ[%d] doorbell offset "
15322 "not supported: x%x\n",
15323 wq->queue_id, db_offset);
15327 wq->db_regaddr = bar_memmap_p + db_offset;
15328 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15329 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15330 "format:x%x\n", wq->queue_id,
15331 pci_barset, db_offset, wq->db_format);
15333 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15335 /* Check if DPP was honored by the firmware */
15336 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15337 &wq_create->u.response_1);
15338 if (wq->dpp_enable) {
15339 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15340 &wq_create->u.response_1);
15341 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15343 if (!bar_memmap_p) {
15344 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15345 "3267 WQ[%d] failed to memmap "
15346 "pci barset:x%x\n",
15347 wq->queue_id, pci_barset);
15351 db_offset = wq_create->u.response_1.doorbell_offset;
15352 wq->db_regaddr = bar_memmap_p + db_offset;
15353 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15354 &wq_create->u.response_1);
15355 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15356 &wq_create->u.response_1);
15357 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15359 if (!bar_memmap_p) {
15360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15361 "3268 WQ[%d] failed to memmap "
15362 "pci barset:x%x\n",
15363 wq->queue_id, dpp_barset);
15367 dpp_offset = wq_create->u.response_1.dpp_offset;
15368 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15369 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15370 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15371 "dpp_id:x%x dpp_barset:x%x "
15372 "dpp_offset:x%x\n",
15373 wq->queue_id, pci_barset, db_offset,
15374 wq->dpp_id, dpp_barset, dpp_offset);
15376 /* Enable combined writes for DPP aperture */
15377 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15379 rc = set_memory_wc(pg_addr, 1);
15381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15382 "3272 Cannot setup Combined "
15383 "Write on WQ[%d] - disable DPP\n",
15385 phba->cfg_enable_dpp = 0;
15388 phba->cfg_enable_dpp = 0;
15391 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15393 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15394 if (wq->pring == NULL) {
15398 wq->type = LPFC_WQ;
15399 wq->assoc_qid = cq->queue_id;
15400 wq->subtype = subtype;
15401 wq->host_index = 0;
15403 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
15405 /* link the wq onto the parent cq child list */
15406 list_add_tail(&wq->list, &cq->child_list);
15408 mempool_free(mbox, phba->mbox_mem_pool);
15413 * lpfc_rq_create - Create a Receive Queue on the HBA
15414 * @phba: HBA structure that indicates port to create a queue on.
15415 * @hrq: The queue structure to use to create the header receive queue.
15416 * @drq: The queue structure to use to create the data receive queue.
15417 * @cq: The completion queue to bind this work queue to.
15419 * This function creates a receive buffer queue pair , as detailed in @hrq and
15420 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15423 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15424 * struct is used to get the entry count that is necessary to determine the
15425 * number of pages to use for this queue. The @cq is used to indicate which
15426 * completion queue to bind received buffers that are posted to these queues to.
15427 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15428 * receive queue pair. This function is asynchronous and will wait for the
15429 * mailbox command to finish before continuing.
15431 * On success this function will return a zero. If unable to allocate enough
15432 * memory this function will return -ENOMEM. If the queue create mailbox command
15433 * fails this function will return -ENXIO.
15436 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15437 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15439 struct lpfc_mbx_rq_create *rq_create;
15440 struct lpfc_dmabuf *dmabuf;
15441 LPFC_MBOXQ_t *mbox;
15442 int rc, length, status = 0;
15443 uint32_t shdr_status, shdr_add_status;
15444 union lpfc_sli4_cfg_shdr *shdr;
15445 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15446 void __iomem *bar_memmap_p;
15447 uint32_t db_offset;
15448 uint16_t pci_barset;
15450 /* sanity check on queue memory */
15451 if (!hrq || !drq || !cq)
15453 if (!phba->sli4_hba.pc_sli4_params.supported)
15454 hw_page_size = SLI4_PAGE_SIZE;
15456 if (hrq->entry_count != drq->entry_count)
15458 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15461 length = (sizeof(struct lpfc_mbx_rq_create) -
15462 sizeof(struct lpfc_sli4_cfg_mhdr));
15463 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15464 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15465 length, LPFC_SLI4_MBX_EMBED);
15466 rq_create = &mbox->u.mqe.un.rq_create;
15467 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15468 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15469 phba->sli4_hba.pc_sli4_params.rqv);
15470 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15471 bf_set(lpfc_rq_context_rqe_count_1,
15472 &rq_create->u.request.context,
15474 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15475 bf_set(lpfc_rq_context_rqe_size,
15476 &rq_create->u.request.context,
15478 bf_set(lpfc_rq_context_page_size,
15479 &rq_create->u.request.context,
15480 LPFC_RQ_PAGE_SIZE_4096);
15482 switch (hrq->entry_count) {
15484 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15485 "2535 Unsupported RQ count. (%d)\n",
15487 if (hrq->entry_count < 512) {
15491 /* otherwise default to smallest count (drop through) */
15493 bf_set(lpfc_rq_context_rqe_count,
15494 &rq_create->u.request.context,
15495 LPFC_RQ_RING_SIZE_512);
15498 bf_set(lpfc_rq_context_rqe_count,
15499 &rq_create->u.request.context,
15500 LPFC_RQ_RING_SIZE_1024);
15503 bf_set(lpfc_rq_context_rqe_count,
15504 &rq_create->u.request.context,
15505 LPFC_RQ_RING_SIZE_2048);
15508 bf_set(lpfc_rq_context_rqe_count,
15509 &rq_create->u.request.context,
15510 LPFC_RQ_RING_SIZE_4096);
15513 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15514 LPFC_HDR_BUF_SIZE);
15516 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15518 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15520 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15521 memset(dmabuf->virt, 0, hw_page_size);
15522 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15523 putPaddrLow(dmabuf->phys);
15524 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15525 putPaddrHigh(dmabuf->phys);
15527 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15528 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15530 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15531 /* The IOCTL status is embedded in the mailbox subheader. */
15532 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15533 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15534 if (shdr_status || shdr_add_status || rc) {
15535 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15536 "2504 RQ_CREATE mailbox failed with "
15537 "status x%x add_status x%x, mbx status x%x\n",
15538 shdr_status, shdr_add_status, rc);
15542 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15543 if (hrq->queue_id == 0xFFFF) {
15548 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15549 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15550 &rq_create->u.response);
15551 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15552 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15554 "3262 RQ [%d] doorbell format not "
15555 "supported: x%x\n", hrq->queue_id,
15561 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15562 &rq_create->u.response);
15563 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15564 if (!bar_memmap_p) {
15565 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15566 "3269 RQ[%d] failed to memmap pci "
15567 "barset:x%x\n", hrq->queue_id,
15573 db_offset = rq_create->u.response.doorbell_offset;
15574 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15575 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15576 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15577 "3270 RQ[%d] doorbell offset not "
15578 "supported: x%x\n", hrq->queue_id,
15583 hrq->db_regaddr = bar_memmap_p + db_offset;
15584 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15585 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15586 "format:x%x\n", hrq->queue_id, pci_barset,
15587 db_offset, hrq->db_format);
15589 hrq->db_format = LPFC_DB_RING_FORMAT;
15590 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15592 hrq->type = LPFC_HRQ;
15593 hrq->assoc_qid = cq->queue_id;
15594 hrq->subtype = subtype;
15595 hrq->host_index = 0;
15596 hrq->hba_index = 0;
15597 hrq->entry_repost = LPFC_RQ_REPOST;
15599 /* now create the data queue */
15600 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15601 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15602 length, LPFC_SLI4_MBX_EMBED);
15603 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15604 phba->sli4_hba.pc_sli4_params.rqv);
15605 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15606 bf_set(lpfc_rq_context_rqe_count_1,
15607 &rq_create->u.request.context, hrq->entry_count);
15608 if (subtype == LPFC_NVMET)
15609 rq_create->u.request.context.buffer_size =
15610 LPFC_NVMET_DATA_BUF_SIZE;
15612 rq_create->u.request.context.buffer_size =
15613 LPFC_DATA_BUF_SIZE;
15614 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15616 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15617 (PAGE_SIZE/SLI4_PAGE_SIZE));
15619 switch (drq->entry_count) {
15621 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15622 "2536 Unsupported RQ count. (%d)\n",
15624 if (drq->entry_count < 512) {
15628 /* otherwise default to smallest count (drop through) */
15630 bf_set(lpfc_rq_context_rqe_count,
15631 &rq_create->u.request.context,
15632 LPFC_RQ_RING_SIZE_512);
15635 bf_set(lpfc_rq_context_rqe_count,
15636 &rq_create->u.request.context,
15637 LPFC_RQ_RING_SIZE_1024);
15640 bf_set(lpfc_rq_context_rqe_count,
15641 &rq_create->u.request.context,
15642 LPFC_RQ_RING_SIZE_2048);
15645 bf_set(lpfc_rq_context_rqe_count,
15646 &rq_create->u.request.context,
15647 LPFC_RQ_RING_SIZE_4096);
15650 if (subtype == LPFC_NVMET)
15651 bf_set(lpfc_rq_context_buf_size,
15652 &rq_create->u.request.context,
15653 LPFC_NVMET_DATA_BUF_SIZE);
15655 bf_set(lpfc_rq_context_buf_size,
15656 &rq_create->u.request.context,
15657 LPFC_DATA_BUF_SIZE);
15659 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15661 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15663 list_for_each_entry(dmabuf, &drq->page_list, list) {
15664 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15665 putPaddrLow(dmabuf->phys);
15666 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15667 putPaddrHigh(dmabuf->phys);
15669 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15670 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15671 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15672 /* The IOCTL status is embedded in the mailbox subheader. */
15673 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15674 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15675 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15676 if (shdr_status || shdr_add_status || rc) {
15680 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15681 if (drq->queue_id == 0xFFFF) {
15685 drq->type = LPFC_DRQ;
15686 drq->assoc_qid = cq->queue_id;
15687 drq->subtype = subtype;
15688 drq->host_index = 0;
15689 drq->hba_index = 0;
15690 drq->entry_repost = LPFC_RQ_REPOST;
15692 /* link the header and data RQs onto the parent cq child list */
15693 list_add_tail(&hrq->list, &cq->child_list);
15694 list_add_tail(&drq->list, &cq->child_list);
15697 mempool_free(mbox, phba->mbox_mem_pool);
15702 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
15703 * @phba: HBA structure that indicates port to create a queue on.
15704 * @hrqp: The queue structure array to use to create the header receive queues.
15705 * @drqp: The queue structure array to use to create the data receive queues.
15706 * @cqp: The completion queue array to bind these receive queues to.
15708 * This function creates a receive buffer queue pair , as detailed in @hrq and
15709 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15712 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15713 * struct is used to get the entry count that is necessary to determine the
15714 * number of pages to use for this queue. The @cq is used to indicate which
15715 * completion queue to bind received buffers that are posted to these queues to.
15716 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15717 * receive queue pair. This function is asynchronous and will wait for the
15718 * mailbox command to finish before continuing.
15720 * On success this function will return a zero. If unable to allocate enough
15721 * memory this function will return -ENOMEM. If the queue create mailbox command
15722 * fails this function will return -ENXIO.
15725 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15726 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15729 struct lpfc_queue *hrq, *drq, *cq;
15730 struct lpfc_mbx_rq_create_v2 *rq_create;
15731 struct lpfc_dmabuf *dmabuf;
15732 LPFC_MBOXQ_t *mbox;
15733 int rc, length, alloclen, status = 0;
15734 int cnt, idx, numrq, page_idx = 0;
15735 uint32_t shdr_status, shdr_add_status;
15736 union lpfc_sli4_cfg_shdr *shdr;
15737 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15739 numrq = phba->cfg_nvmet_mrq;
15740 /* sanity check on array memory */
15741 if (!hrqp || !drqp || !cqp || !numrq)
15743 if (!phba->sli4_hba.pc_sli4_params.supported)
15744 hw_page_size = SLI4_PAGE_SIZE;
15746 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15750 length = sizeof(struct lpfc_mbx_rq_create_v2);
15751 length += ((2 * numrq * hrqp[0]->page_count) *
15752 sizeof(struct dma_address));
15754 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15755 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15756 LPFC_SLI4_MBX_NEMBED);
15757 if (alloclen < length) {
15758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15759 "3099 Allocated DMA memory size (%d) is "
15760 "less than the requested DMA memory size "
15761 "(%d)\n", alloclen, length);
15768 rq_create = mbox->sge_array->addr[0];
15769 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15771 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15774 for (idx = 0; idx < numrq; idx++) {
15779 /* sanity check on queue memory */
15780 if (!hrq || !drq || !cq) {
15785 if (hrq->entry_count != drq->entry_count) {
15791 bf_set(lpfc_mbx_rq_create_num_pages,
15792 &rq_create->u.request,
15794 bf_set(lpfc_mbx_rq_create_rq_cnt,
15795 &rq_create->u.request, (numrq * 2));
15796 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
15798 bf_set(lpfc_rq_context_base_cq,
15799 &rq_create->u.request.context,
15801 bf_set(lpfc_rq_context_data_size,
15802 &rq_create->u.request.context,
15803 LPFC_NVMET_DATA_BUF_SIZE);
15804 bf_set(lpfc_rq_context_hdr_size,
15805 &rq_create->u.request.context,
15806 LPFC_HDR_BUF_SIZE);
15807 bf_set(lpfc_rq_context_rqe_count_1,
15808 &rq_create->u.request.context,
15810 bf_set(lpfc_rq_context_rqe_size,
15811 &rq_create->u.request.context,
15813 bf_set(lpfc_rq_context_page_size,
15814 &rq_create->u.request.context,
15815 (PAGE_SIZE/SLI4_PAGE_SIZE));
15818 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15819 memset(dmabuf->virt, 0, hw_page_size);
15820 cnt = page_idx + dmabuf->buffer_tag;
15821 rq_create->u.request.page[cnt].addr_lo =
15822 putPaddrLow(dmabuf->phys);
15823 rq_create->u.request.page[cnt].addr_hi =
15824 putPaddrHigh(dmabuf->phys);
15830 list_for_each_entry(dmabuf, &drq->page_list, list) {
15831 memset(dmabuf->virt, 0, hw_page_size);
15832 cnt = page_idx + dmabuf->buffer_tag;
15833 rq_create->u.request.page[cnt].addr_lo =
15834 putPaddrLow(dmabuf->phys);
15835 rq_create->u.request.page[cnt].addr_hi =
15836 putPaddrHigh(dmabuf->phys);
15841 hrq->db_format = LPFC_DB_RING_FORMAT;
15842 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15843 hrq->type = LPFC_HRQ;
15844 hrq->assoc_qid = cq->queue_id;
15845 hrq->subtype = subtype;
15846 hrq->host_index = 0;
15847 hrq->hba_index = 0;
15848 hrq->entry_repost = LPFC_RQ_REPOST;
15850 drq->db_format = LPFC_DB_RING_FORMAT;
15851 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15852 drq->type = LPFC_DRQ;
15853 drq->assoc_qid = cq->queue_id;
15854 drq->subtype = subtype;
15855 drq->host_index = 0;
15856 drq->hba_index = 0;
15857 drq->entry_repost = LPFC_RQ_REPOST;
15859 list_add_tail(&hrq->list, &cq->child_list);
15860 list_add_tail(&drq->list, &cq->child_list);
15863 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15864 /* The IOCTL status is embedded in the mailbox subheader. */
15865 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15866 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15867 if (shdr_status || shdr_add_status || rc) {
15868 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15869 "3120 RQ_CREATE mailbox failed with "
15870 "status x%x add_status x%x, mbx status x%x\n",
15871 shdr_status, shdr_add_status, rc);
15875 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15876 if (rc == 0xFFFF) {
15881 /* Initialize all RQs with associated queue id */
15882 for (idx = 0; idx < numrq; idx++) {
15884 hrq->queue_id = rc + (2 * idx);
15886 drq->queue_id = rc + (2 * idx) + 1;
15890 lpfc_sli4_mbox_cmd_free(phba, mbox);
15895 * lpfc_eq_destroy - Destroy an event Queue on the HBA
15896 * @eq: The queue structure associated with the queue to destroy.
15898 * This function destroys a queue, as detailed in @eq by sending an mailbox
15899 * command, specific to the type of queue, to the HBA.
15901 * The @eq struct is used to get the queue ID of the queue to destroy.
15903 * On success this function will return a zero. If the queue destroy mailbox
15904 * command fails this function will return -ENXIO.
15907 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
15909 LPFC_MBOXQ_t *mbox;
15910 int rc, length, status = 0;
15911 uint32_t shdr_status, shdr_add_status;
15912 union lpfc_sli4_cfg_shdr *shdr;
15914 /* sanity check on queue memory */
15917 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
15920 length = (sizeof(struct lpfc_mbx_eq_destroy) -
15921 sizeof(struct lpfc_sli4_cfg_mhdr));
15922 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15923 LPFC_MBOX_OPCODE_EQ_DESTROY,
15924 length, LPFC_SLI4_MBX_EMBED);
15925 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
15927 mbox->vport = eq->phba->pport;
15928 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15930 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
15931 /* The IOCTL status is embedded in the mailbox subheader. */
15932 shdr = (union lpfc_sli4_cfg_shdr *)
15933 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
15934 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15935 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15936 if (shdr_status || shdr_add_status || rc) {
15937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15938 "2505 EQ_DESTROY mailbox failed with "
15939 "status x%x add_status x%x, mbx status x%x\n",
15940 shdr_status, shdr_add_status, rc);
15944 /* Remove eq from any list */
15945 list_del_init(&eq->list);
15946 mempool_free(mbox, eq->phba->mbox_mem_pool);
15951 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
15952 * @cq: The queue structure associated with the queue to destroy.
15954 * This function destroys a queue, as detailed in @cq by sending an mailbox
15955 * command, specific to the type of queue, to the HBA.
15957 * The @cq struct is used to get the queue ID of the queue to destroy.
15959 * On success this function will return a zero. If the queue destroy mailbox
15960 * command fails this function will return -ENXIO.
15963 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
15965 LPFC_MBOXQ_t *mbox;
15966 int rc, length, status = 0;
15967 uint32_t shdr_status, shdr_add_status;
15968 union lpfc_sli4_cfg_shdr *shdr;
15970 /* sanity check on queue memory */
15973 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
15976 length = (sizeof(struct lpfc_mbx_cq_destroy) -
15977 sizeof(struct lpfc_sli4_cfg_mhdr));
15978 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15979 LPFC_MBOX_OPCODE_CQ_DESTROY,
15980 length, LPFC_SLI4_MBX_EMBED);
15981 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
15983 mbox->vport = cq->phba->pport;
15984 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15985 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
15986 /* The IOCTL status is embedded in the mailbox subheader. */
15987 shdr = (union lpfc_sli4_cfg_shdr *)
15988 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
15989 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15990 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15991 if (shdr_status || shdr_add_status || rc) {
15992 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15993 "2506 CQ_DESTROY mailbox failed with "
15994 "status x%x add_status x%x, mbx status x%x\n",
15995 shdr_status, shdr_add_status, rc);
15998 /* Remove cq from any list */
15999 list_del_init(&cq->list);
16000 mempool_free(mbox, cq->phba->mbox_mem_pool);
16005 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16006 * @qm: The queue structure associated with the queue to destroy.
16008 * This function destroys a queue, as detailed in @mq by sending an mailbox
16009 * command, specific to the type of queue, to the HBA.
16011 * The @mq struct is used to get the queue ID of the queue to destroy.
16013 * On success this function will return a zero. If the queue destroy mailbox
16014 * command fails this function will return -ENXIO.
16017 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16019 LPFC_MBOXQ_t *mbox;
16020 int rc, length, status = 0;
16021 uint32_t shdr_status, shdr_add_status;
16022 union lpfc_sli4_cfg_shdr *shdr;
16024 /* sanity check on queue memory */
16027 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16030 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16031 sizeof(struct lpfc_sli4_cfg_mhdr));
16032 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16033 LPFC_MBOX_OPCODE_MQ_DESTROY,
16034 length, LPFC_SLI4_MBX_EMBED);
16035 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16037 mbox->vport = mq->phba->pport;
16038 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16039 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16040 /* The IOCTL status is embedded in the mailbox subheader. */
16041 shdr = (union lpfc_sli4_cfg_shdr *)
16042 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16043 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16044 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16045 if (shdr_status || shdr_add_status || rc) {
16046 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16047 "2507 MQ_DESTROY mailbox failed with "
16048 "status x%x add_status x%x, mbx status x%x\n",
16049 shdr_status, shdr_add_status, rc);
16052 /* Remove mq from any list */
16053 list_del_init(&mq->list);
16054 mempool_free(mbox, mq->phba->mbox_mem_pool);
16059 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16060 * @wq: The queue structure associated with the queue to destroy.
16062 * This function destroys a queue, as detailed in @wq by sending an mailbox
16063 * command, specific to the type of queue, to the HBA.
16065 * The @wq struct is used to get the queue ID of the queue to destroy.
16067 * On success this function will return a zero. If the queue destroy mailbox
16068 * command fails this function will return -ENXIO.
16071 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16073 LPFC_MBOXQ_t *mbox;
16074 int rc, length, status = 0;
16075 uint32_t shdr_status, shdr_add_status;
16076 union lpfc_sli4_cfg_shdr *shdr;
16078 /* sanity check on queue memory */
16081 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16084 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16085 sizeof(struct lpfc_sli4_cfg_mhdr));
16086 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16087 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16088 length, LPFC_SLI4_MBX_EMBED);
16089 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16091 mbox->vport = wq->phba->pport;
16092 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16093 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16094 shdr = (union lpfc_sli4_cfg_shdr *)
16095 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16096 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16097 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16098 if (shdr_status || shdr_add_status || rc) {
16099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16100 "2508 WQ_DESTROY mailbox failed with "
16101 "status x%x add_status x%x, mbx status x%x\n",
16102 shdr_status, shdr_add_status, rc);
16105 /* Remove wq from any list */
16106 list_del_init(&wq->list);
16109 mempool_free(mbox, wq->phba->mbox_mem_pool);
16114 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16115 * @rq: The queue structure associated with the queue to destroy.
16117 * This function destroys a queue, as detailed in @rq by sending an mailbox
16118 * command, specific to the type of queue, to the HBA.
16120 * The @rq struct is used to get the queue ID of the queue to destroy.
16122 * On success this function will return a zero. If the queue destroy mailbox
16123 * command fails this function will return -ENXIO.
16126 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16127 struct lpfc_queue *drq)
16129 LPFC_MBOXQ_t *mbox;
16130 int rc, length, status = 0;
16131 uint32_t shdr_status, shdr_add_status;
16132 union lpfc_sli4_cfg_shdr *shdr;
16134 /* sanity check on queue memory */
16137 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16140 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16141 sizeof(struct lpfc_sli4_cfg_mhdr));
16142 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16143 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16144 length, LPFC_SLI4_MBX_EMBED);
16145 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16147 mbox->vport = hrq->phba->pport;
16148 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16149 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16150 /* The IOCTL status is embedded in the mailbox subheader. */
16151 shdr = (union lpfc_sli4_cfg_shdr *)
16152 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16153 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16154 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16155 if (shdr_status || shdr_add_status || rc) {
16156 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16157 "2509 RQ_DESTROY mailbox failed with "
16158 "status x%x add_status x%x, mbx status x%x\n",
16159 shdr_status, shdr_add_status, rc);
16160 if (rc != MBX_TIMEOUT)
16161 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16164 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16166 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16167 shdr = (union lpfc_sli4_cfg_shdr *)
16168 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16169 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16170 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16171 if (shdr_status || shdr_add_status || rc) {
16172 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16173 "2510 RQ_DESTROY mailbox failed with "
16174 "status x%x add_status x%x, mbx status x%x\n",
16175 shdr_status, shdr_add_status, rc);
16178 list_del_init(&hrq->list);
16179 list_del_init(&drq->list);
16180 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16185 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16186 * @phba: The virtual port for which this call being executed.
16187 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16188 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16189 * @xritag: the xritag that ties this io to the SGL pages.
16191 * This routine will post the sgl pages for the IO that has the xritag
16192 * that is in the iocbq structure. The xritag is assigned during iocbq
16193 * creation and persists for as long as the driver is loaded.
16194 * if the caller has fewer than 256 scatter gather segments to map then
16195 * pdma_phys_addr1 should be 0.
16196 * If the caller needs to map more than 256 scatter gather segment then
16197 * pdma_phys_addr1 should be a valid physical address.
16198 * physical address for SGLs must be 64 byte aligned.
16199 * If you are going to map 2 SGL's then the first one must have 256 entries
16200 * the second sgl can have between 1 and 256 entries.
16204 * -ENXIO, -ENOMEM - Failure
16207 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16208 dma_addr_t pdma_phys_addr0,
16209 dma_addr_t pdma_phys_addr1,
16212 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16213 LPFC_MBOXQ_t *mbox;
16215 uint32_t shdr_status, shdr_add_status;
16217 union lpfc_sli4_cfg_shdr *shdr;
16219 if (xritag == NO_XRI) {
16220 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16221 "0364 Invalid param:\n");
16225 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16229 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16230 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16231 sizeof(struct lpfc_mbx_post_sgl_pages) -
16232 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16234 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16235 &mbox->u.mqe.un.post_sgl_pages;
16236 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16237 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16239 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16240 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16241 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16242 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16244 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16245 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16246 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16247 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16248 if (!phba->sli4_hba.intr_enable)
16249 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16251 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16252 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16254 /* The IOCTL status is embedded in the mailbox subheader. */
16255 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16256 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16257 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16258 if (rc != MBX_TIMEOUT)
16259 mempool_free(mbox, phba->mbox_mem_pool);
16260 if (shdr_status || shdr_add_status || rc) {
16261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16262 "2511 POST_SGL mailbox failed with "
16263 "status x%x add_status x%x, mbx status x%x\n",
16264 shdr_status, shdr_add_status, rc);
16270 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16271 * @phba: pointer to lpfc hba data structure.
16273 * This routine is invoked to post rpi header templates to the
16274 * HBA consistent with the SLI-4 interface spec. This routine
16275 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16276 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16279 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16280 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16283 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16288 * Fetch the next logical xri. Because this index is logical,
16289 * the driver starts at 0 each time.
16291 spin_lock_irq(&phba->hbalock);
16292 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16293 phba->sli4_hba.max_cfg_param.max_xri, 0);
16294 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16295 spin_unlock_irq(&phba->hbalock);
16298 set_bit(xri, phba->sli4_hba.xri_bmask);
16299 phba->sli4_hba.max_cfg_param.xri_used++;
16301 spin_unlock_irq(&phba->hbalock);
16306 * lpfc_sli4_free_xri - Release an xri for reuse.
16307 * @phba: pointer to lpfc hba data structure.
16309 * This routine is invoked to release an xri to the pool of
16310 * available rpis maintained by the driver.
16313 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16315 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16316 phba->sli4_hba.max_cfg_param.xri_used--;
16321 * lpfc_sli4_free_xri - Release an xri for reuse.
16322 * @phba: pointer to lpfc hba data structure.
16324 * This routine is invoked to release an xri to the pool of
16325 * available rpis maintained by the driver.
16328 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16330 spin_lock_irq(&phba->hbalock);
16331 __lpfc_sli4_free_xri(phba, xri);
16332 spin_unlock_irq(&phba->hbalock);
16336 * lpfc_sli4_next_xritag - Get an xritag for the io
16337 * @phba: Pointer to HBA context object.
16339 * This function gets an xritag for the iocb. If there is no unused xritag
16340 * it will return 0xffff.
16341 * The function returns the allocated xritag if successful, else returns zero.
16342 * Zero is not a valid xritag.
16343 * The caller is not required to hold any lock.
16346 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16348 uint16_t xri_index;
16350 xri_index = lpfc_sli4_alloc_xri(phba);
16351 if (xri_index == NO_XRI)
16352 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16353 "2004 Failed to allocate XRI.last XRITAG is %d"
16354 " Max XRI is %d, Used XRI is %d\n",
16356 phba->sli4_hba.max_cfg_param.max_xri,
16357 phba->sli4_hba.max_cfg_param.xri_used);
16362 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
16363 * @phba: pointer to lpfc hba data structure.
16364 * @post_sgl_list: pointer to els sgl entry list.
16365 * @count: number of els sgl entries on the list.
16367 * This routine is invoked to post a block of driver's sgl pages to the
16368 * HBA using non-embedded mailbox command. No Lock is held. This routine
16369 * is only called when the driver is loading and after all IO has been
16373 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16374 struct list_head *post_sgl_list,
16377 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16378 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16379 struct sgl_page_pairs *sgl_pg_pairs;
16381 LPFC_MBOXQ_t *mbox;
16382 uint32_t reqlen, alloclen, pg_pairs;
16384 uint16_t xritag_start = 0;
16386 uint32_t shdr_status, shdr_add_status;
16387 union lpfc_sli4_cfg_shdr *shdr;
16389 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16390 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16391 if (reqlen > SLI4_PAGE_SIZE) {
16392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16393 "2559 Block sgl registration required DMA "
16394 "size (%d) great than a page\n", reqlen);
16398 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16402 /* Allocate DMA memory and set up the non-embedded mailbox command */
16403 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16404 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16405 LPFC_SLI4_MBX_NEMBED);
16407 if (alloclen < reqlen) {
16408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16409 "0285 Allocated DMA memory size (%d) is "
16410 "less than the requested DMA memory "
16411 "size (%d)\n", alloclen, reqlen);
16412 lpfc_sli4_mbox_cmd_free(phba, mbox);
16415 /* Set up the SGL pages in the non-embedded DMA pages */
16416 viraddr = mbox->sge_array->addr[0];
16417 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16418 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16421 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16422 /* Set up the sge entry */
16423 sgl_pg_pairs->sgl_pg0_addr_lo =
16424 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16425 sgl_pg_pairs->sgl_pg0_addr_hi =
16426 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16427 sgl_pg_pairs->sgl_pg1_addr_lo =
16428 cpu_to_le32(putPaddrLow(0));
16429 sgl_pg_pairs->sgl_pg1_addr_hi =
16430 cpu_to_le32(putPaddrHigh(0));
16432 /* Keep the first xritag on the list */
16434 xritag_start = sglq_entry->sli4_xritag;
16439 /* Complete initialization and perform endian conversion. */
16440 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16441 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16442 sgl->word0 = cpu_to_le32(sgl->word0);
16444 if (!phba->sli4_hba.intr_enable)
16445 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16447 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16448 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16450 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16451 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16452 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16453 if (rc != MBX_TIMEOUT)
16454 lpfc_sli4_mbox_cmd_free(phba, mbox);
16455 if (shdr_status || shdr_add_status || rc) {
16456 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16457 "2513 POST_SGL_BLOCK mailbox command failed "
16458 "status x%x add_status x%x mbx status x%x\n",
16459 shdr_status, shdr_add_status, rc);
16466 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
16467 * @phba: pointer to lpfc hba data structure.
16468 * @sblist: pointer to scsi buffer list.
16469 * @count: number of scsi buffers on the list.
16471 * This routine is invoked to post a block of @count scsi sgl pages from a
16472 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
16477 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
16478 struct list_head *sblist,
16481 struct lpfc_scsi_buf *psb;
16482 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16483 struct sgl_page_pairs *sgl_pg_pairs;
16485 LPFC_MBOXQ_t *mbox;
16486 uint32_t reqlen, alloclen, pg_pairs;
16488 uint16_t xritag_start = 0;
16490 uint32_t shdr_status, shdr_add_status;
16491 dma_addr_t pdma_phys_bpl1;
16492 union lpfc_sli4_cfg_shdr *shdr;
16494 /* Calculate the requested length of the dma memory */
16495 reqlen = count * sizeof(struct sgl_page_pairs) +
16496 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16497 if (reqlen > SLI4_PAGE_SIZE) {
16498 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16499 "0217 Block sgl registration required DMA "
16500 "size (%d) great than a page\n", reqlen);
16503 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16505 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16506 "0283 Failed to allocate mbox cmd memory\n");
16510 /* Allocate DMA memory and set up the non-embedded mailbox command */
16511 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16512 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16513 LPFC_SLI4_MBX_NEMBED);
16515 if (alloclen < reqlen) {
16516 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16517 "2561 Allocated DMA memory size (%d) is "
16518 "less than the requested DMA memory "
16519 "size (%d)\n", alloclen, reqlen);
16520 lpfc_sli4_mbox_cmd_free(phba, mbox);
16524 /* Get the first SGE entry from the non-embedded DMA memory */
16525 viraddr = mbox->sge_array->addr[0];
16527 /* Set up the SGL pages in the non-embedded DMA pages */
16528 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16529 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16532 list_for_each_entry(psb, sblist, list) {
16533 /* Set up the sge entry */
16534 sgl_pg_pairs->sgl_pg0_addr_lo =
16535 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
16536 sgl_pg_pairs->sgl_pg0_addr_hi =
16537 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
16538 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16539 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
16541 pdma_phys_bpl1 = 0;
16542 sgl_pg_pairs->sgl_pg1_addr_lo =
16543 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16544 sgl_pg_pairs->sgl_pg1_addr_hi =
16545 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16546 /* Keep the first xritag on the list */
16548 xritag_start = psb->cur_iocbq.sli4_xritag;
16552 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16553 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16554 /* Perform endian conversion if necessary */
16555 sgl->word0 = cpu_to_le32(sgl->word0);
16557 if (!phba->sli4_hba.intr_enable)
16558 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16560 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16561 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16563 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16564 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16565 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16566 if (rc != MBX_TIMEOUT)
16567 lpfc_sli4_mbox_cmd_free(phba, mbox);
16568 if (shdr_status || shdr_add_status || rc) {
16569 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16570 "2564 POST_SGL_BLOCK mailbox command failed "
16571 "status x%x add_status x%x mbx status x%x\n",
16572 shdr_status, shdr_add_status, rc);
16579 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16580 * @phba: pointer to lpfc_hba struct that the frame was received on
16581 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16583 * This function checks the fields in the @fc_hdr to see if the FC frame is a
16584 * valid type of frame that the LPFC driver will handle. This function will
16585 * return a zero if the frame is a valid frame or a non zero value when the
16586 * frame does not pass the check.
16589 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16591 /* make rctl_names static to save stack space */
16592 struct fc_vft_header *fc_vft_hdr;
16593 uint32_t *header = (uint32_t *) fc_hdr;
16595 #define FC_RCTL_MDS_DIAGS 0xF4
16597 switch (fc_hdr->fh_r_ctl) {
16598 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16599 case FC_RCTL_DD_SOL_DATA: /* solicited data */
16600 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
16601 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
16602 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
16603 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
16604 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
16605 case FC_RCTL_DD_CMD_STATUS: /* command status */
16606 case FC_RCTL_ELS_REQ: /* extended link services request */
16607 case FC_RCTL_ELS_REP: /* extended link services reply */
16608 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
16609 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
16610 case FC_RCTL_BA_NOP: /* basic link service NOP */
16611 case FC_RCTL_BA_ABTS: /* basic link service abort */
16612 case FC_RCTL_BA_RMC: /* remove connection */
16613 case FC_RCTL_BA_ACC: /* basic accept */
16614 case FC_RCTL_BA_RJT: /* basic reject */
16615 case FC_RCTL_BA_PRMT:
16616 case FC_RCTL_ACK_1: /* acknowledge_1 */
16617 case FC_RCTL_ACK_0: /* acknowledge_0 */
16618 case FC_RCTL_P_RJT: /* port reject */
16619 case FC_RCTL_F_RJT: /* fabric reject */
16620 case FC_RCTL_P_BSY: /* port busy */
16621 case FC_RCTL_F_BSY: /* fabric busy to data frame */
16622 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
16623 case FC_RCTL_LCR: /* link credit reset */
16624 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
16625 case FC_RCTL_END: /* end */
16627 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
16628 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16629 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16630 return lpfc_fc_frame_check(phba, fc_hdr);
16635 #define FC_TYPE_VENDOR_UNIQUE 0xFF
16637 switch (fc_hdr->fh_type) {
16643 case FC_TYPE_VENDOR_UNIQUE:
16651 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
16652 "2538 Received frame rctl:x%x, type:x%x, "
16653 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
16654 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
16655 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16656 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
16657 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
16658 be32_to_cpu(header[6]));
16661 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
16662 "2539 Dropped frame rctl:x%x type:x%x\n",
16663 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16668 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
16669 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16671 * This function processes the FC header to retrieve the VFI from the VF
16672 * header, if one exists. This function will return the VFI if one exists
16673 * or 0 if no VSAN Header exists.
16676 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
16678 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16680 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
16682 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
16686 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
16687 * @phba: Pointer to the HBA structure to search for the vport on
16688 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16689 * @fcfi: The FC Fabric ID that the frame came from
16691 * This function searches the @phba for a vport that matches the content of the
16692 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
16693 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
16694 * returns the matching vport pointer or NULL if unable to match frame to a
16697 static struct lpfc_vport *
16698 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
16699 uint16_t fcfi, uint32_t did)
16701 struct lpfc_vport **vports;
16702 struct lpfc_vport *vport = NULL;
16705 if (did == Fabric_DID)
16706 return phba->pport;
16707 if ((phba->pport->fc_flag & FC_PT2PT) &&
16708 !(phba->link_state == LPFC_HBA_READY))
16709 return phba->pport;
16711 vports = lpfc_create_vport_work_array(phba);
16712 if (vports != NULL) {
16713 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
16714 if (phba->fcf.fcfi == fcfi &&
16715 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
16716 vports[i]->fc_myDID == did) {
16722 lpfc_destroy_vport_work_array(phba, vports);
16727 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
16728 * @vport: The vport to work on.
16730 * This function updates the receive sequence time stamp for this vport. The
16731 * receive sequence time stamp indicates the time that the last frame of the
16732 * the sequence that has been idle for the longest amount of time was received.
16733 * the driver uses this time stamp to indicate if any received sequences have
16737 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
16739 struct lpfc_dmabuf *h_buf;
16740 struct hbq_dmabuf *dmabuf = NULL;
16742 /* get the oldest sequence on the rcv list */
16743 h_buf = list_get_first(&vport->rcv_buffer_list,
16744 struct lpfc_dmabuf, list);
16747 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16748 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
16752 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
16753 * @vport: The vport that the received sequences were sent to.
16755 * This function cleans up all outstanding received sequences. This is called
16756 * by the driver when a link event or user action invalidates all the received
16760 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
16762 struct lpfc_dmabuf *h_buf, *hnext;
16763 struct lpfc_dmabuf *d_buf, *dnext;
16764 struct hbq_dmabuf *dmabuf = NULL;
16766 /* start with the oldest sequence on the rcv list */
16767 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16768 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16769 list_del_init(&dmabuf->hbuf.list);
16770 list_for_each_entry_safe(d_buf, dnext,
16771 &dmabuf->dbuf.list, list) {
16772 list_del_init(&d_buf->list);
16773 lpfc_in_buf_free(vport->phba, d_buf);
16775 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16780 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
16781 * @vport: The vport that the received sequences were sent to.
16783 * This function determines whether any received sequences have timed out by
16784 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
16785 * indicates that there is at least one timed out sequence this routine will
16786 * go through the received sequences one at a time from most inactive to most
16787 * active to determine which ones need to be cleaned up. Once it has determined
16788 * that a sequence needs to be cleaned up it will simply free up the resources
16789 * without sending an abort.
16792 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
16794 struct lpfc_dmabuf *h_buf, *hnext;
16795 struct lpfc_dmabuf *d_buf, *dnext;
16796 struct hbq_dmabuf *dmabuf = NULL;
16797 unsigned long timeout;
16798 int abort_count = 0;
16800 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16801 vport->rcv_buffer_time_stamp);
16802 if (list_empty(&vport->rcv_buffer_list) ||
16803 time_before(jiffies, timeout))
16805 /* start with the oldest sequence on the rcv list */
16806 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16807 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16808 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16809 dmabuf->time_stamp);
16810 if (time_before(jiffies, timeout))
16813 list_del_init(&dmabuf->hbuf.list);
16814 list_for_each_entry_safe(d_buf, dnext,
16815 &dmabuf->dbuf.list, list) {
16816 list_del_init(&d_buf->list);
16817 lpfc_in_buf_free(vport->phba, d_buf);
16819 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16822 lpfc_update_rcv_time_stamp(vport);
16826 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
16827 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
16829 * This function searches through the existing incomplete sequences that have
16830 * been sent to this @vport. If the frame matches one of the incomplete
16831 * sequences then the dbuf in the @dmabuf is added to the list of frames that
16832 * make up that sequence. If no sequence is found that matches this frame then
16833 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
16834 * This function returns a pointer to the first dmabuf in the sequence list that
16835 * the frame was linked to.
16837 static struct hbq_dmabuf *
16838 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16840 struct fc_frame_header *new_hdr;
16841 struct fc_frame_header *temp_hdr;
16842 struct lpfc_dmabuf *d_buf;
16843 struct lpfc_dmabuf *h_buf;
16844 struct hbq_dmabuf *seq_dmabuf = NULL;
16845 struct hbq_dmabuf *temp_dmabuf = NULL;
16848 INIT_LIST_HEAD(&dmabuf->dbuf.list);
16849 dmabuf->time_stamp = jiffies;
16850 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16852 /* Use the hdr_buf to find the sequence that this frame belongs to */
16853 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16854 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16855 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16856 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16857 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16859 /* found a pending sequence that matches this frame */
16860 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16865 * This indicates first frame received for this sequence.
16866 * Queue the buffer on the vport's rcv_buffer_list.
16868 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16869 lpfc_update_rcv_time_stamp(vport);
16872 temp_hdr = seq_dmabuf->hbuf.virt;
16873 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
16874 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
16875 list_del_init(&seq_dmabuf->hbuf.list);
16876 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16877 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16878 lpfc_update_rcv_time_stamp(vport);
16881 /* move this sequence to the tail to indicate a young sequence */
16882 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
16883 seq_dmabuf->time_stamp = jiffies;
16884 lpfc_update_rcv_time_stamp(vport);
16885 if (list_empty(&seq_dmabuf->dbuf.list)) {
16886 temp_hdr = dmabuf->hbuf.virt;
16887 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16890 /* find the correct place in the sequence to insert this frame */
16891 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
16893 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16894 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
16896 * If the frame's sequence count is greater than the frame on
16897 * the list then insert the frame right after this frame
16899 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
16900 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
16901 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
16906 if (&d_buf->list == &seq_dmabuf->dbuf.list)
16908 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
16917 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
16918 * @vport: pointer to a vitural port
16919 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16921 * This function tries to abort from the partially assembed sequence, described
16922 * by the information from basic abbort @dmabuf. It checks to see whether such
16923 * partially assembled sequence held by the driver. If so, it shall free up all
16924 * the frames from the partially assembled sequence.
16927 * true -- if there is matching partially assembled sequence present and all
16928 * the frames freed with the sequence;
16929 * false -- if there is no matching partially assembled sequence present so
16930 * nothing got aborted in the lower layer driver
16933 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
16934 struct hbq_dmabuf *dmabuf)
16936 struct fc_frame_header *new_hdr;
16937 struct fc_frame_header *temp_hdr;
16938 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
16939 struct hbq_dmabuf *seq_dmabuf = NULL;
16941 /* Use the hdr_buf to find the sequence that matches this frame */
16942 INIT_LIST_HEAD(&dmabuf->dbuf.list);
16943 INIT_LIST_HEAD(&dmabuf->hbuf.list);
16944 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16945 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16946 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16947 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16948 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16949 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16951 /* found a pending sequence that matches this frame */
16952 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16956 /* Free up all the frames from the partially assembled sequence */
16958 list_for_each_entry_safe(d_buf, n_buf,
16959 &seq_dmabuf->dbuf.list, list) {
16960 list_del_init(&d_buf->list);
16961 lpfc_in_buf_free(vport->phba, d_buf);
16969 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
16970 * @vport: pointer to a vitural port
16971 * @dmabuf: pointer to a dmabuf that describes the FC sequence
16973 * This function tries to abort from the assembed sequence from upper level
16974 * protocol, described by the information from basic abbort @dmabuf. It
16975 * checks to see whether such pending context exists at upper level protocol.
16976 * If so, it shall clean up the pending context.
16979 * true -- if there is matching pending context of the sequence cleaned
16981 * false -- if there is no matching pending context of the sequence present
16985 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16987 struct lpfc_hba *phba = vport->phba;
16990 /* Accepting abort at ulp with SLI4 only */
16991 if (phba->sli_rev < LPFC_SLI_REV4)
16994 /* Register all caring upper level protocols to attend abort */
16995 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17003 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
17004 * @phba: Pointer to HBA context object.
17005 * @cmd_iocbq: pointer to the command iocbq structure.
17006 * @rsp_iocbq: pointer to the response iocbq structure.
17008 * This function handles the sequence abort response iocb command complete
17009 * event. It properly releases the memory allocated to the sequence abort
17013 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17014 struct lpfc_iocbq *cmd_iocbq,
17015 struct lpfc_iocbq *rsp_iocbq)
17017 struct lpfc_nodelist *ndlp;
17020 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17021 lpfc_nlp_put(ndlp);
17022 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17025 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17026 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17027 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17028 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17029 rsp_iocbq->iocb.ulpStatus,
17030 rsp_iocbq->iocb.un.ulpWord[4]);
17034 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17035 * @phba: Pointer to HBA context object.
17036 * @xri: xri id in transaction.
17038 * This function validates the xri maps to the known range of XRIs allocated an
17039 * used by the driver.
17042 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17047 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17048 if (xri == phba->sli4_hba.xri_ids[i])
17055 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17056 * @phba: Pointer to HBA context object.
17057 * @fc_hdr: pointer to a FC frame header.
17059 * This function sends a basic response to a previous unsol sequence abort
17060 * event after aborting the sequence handling.
17063 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17064 struct fc_frame_header *fc_hdr, bool aborted)
17066 struct lpfc_hba *phba = vport->phba;
17067 struct lpfc_iocbq *ctiocb = NULL;
17068 struct lpfc_nodelist *ndlp;
17069 uint16_t oxid, rxid, xri, lxri;
17070 uint32_t sid, fctl;
17074 if (!lpfc_is_link_up(phba))
17077 sid = sli4_sid_from_fc_hdr(fc_hdr);
17078 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17079 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17081 ndlp = lpfc_findnode_did(vport, sid);
17083 ndlp = lpfc_nlp_init(vport, sid);
17085 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17086 "1268 Failed to allocate ndlp for "
17087 "oxid:x%x SID:x%x\n", oxid, sid);
17090 /* Put ndlp onto pport node list */
17091 lpfc_enqueue_node(vport, ndlp);
17092 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17093 /* re-setup ndlp without removing from node list */
17094 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17096 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17097 "3275 Failed to active ndlp found "
17098 "for oxid:x%x SID:x%x\n", oxid, sid);
17103 /* Allocate buffer for rsp iocb */
17104 ctiocb = lpfc_sli_get_iocbq(phba);
17108 /* Extract the F_CTL field from FC_HDR */
17109 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17111 icmd = &ctiocb->iocb;
17112 icmd->un.xseq64.bdl.bdeSize = 0;
17113 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17114 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17115 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17116 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17118 /* Fill in the rest of iocb fields */
17119 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17120 icmd->ulpBdeCount = 0;
17122 icmd->ulpClass = CLASS3;
17123 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17124 ctiocb->context1 = lpfc_nlp_get(ndlp);
17126 ctiocb->iocb_cmpl = NULL;
17127 ctiocb->vport = phba->pport;
17128 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17129 ctiocb->sli4_lxritag = NO_XRI;
17130 ctiocb->sli4_xritag = NO_XRI;
17132 if (fctl & FC_FC_EX_CTX)
17133 /* Exchange responder sent the abort so we
17139 lxri = lpfc_sli4_xri_inrange(phba, xri);
17140 if (lxri != NO_XRI)
17141 lpfc_set_rrq_active(phba, ndlp, lxri,
17142 (xri == oxid) ? rxid : oxid, 0);
17143 /* For BA_ABTS from exchange responder, if the logical xri with
17144 * the oxid maps to the FCP XRI range, the port no longer has
17145 * that exchange context, send a BLS_RJT. Override the IOCB for
17148 if ((fctl & FC_FC_EX_CTX) &&
17149 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17150 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17151 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17152 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17153 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17156 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17157 * the driver no longer has that exchange, send a BLS_RJT. Override
17158 * the IOCB for a BA_RJT.
17160 if (aborted == false) {
17161 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17162 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17163 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17164 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17167 if (fctl & FC_FC_EX_CTX) {
17168 /* ABTS sent by responder to CT exchange, construction
17169 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17170 * field and RX_ID from ABTS for RX_ID field.
17172 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17174 /* ABTS sent by initiator to CT exchange, construction
17175 * of BA_ACC will need to allocate a new XRI as for the
17178 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17180 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17181 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17183 /* Xmit CT abts response on exchange <xid> */
17184 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17185 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17186 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17188 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17189 if (rc == IOCB_ERROR) {
17190 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17191 "2925 Failed to issue CT ABTS RSP x%x on "
17192 "xri x%x, Data x%x\n",
17193 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17195 lpfc_nlp_put(ndlp);
17196 ctiocb->context1 = NULL;
17197 lpfc_sli_release_iocbq(phba, ctiocb);
17202 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17203 * @vport: Pointer to the vport on which this sequence was received
17204 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17206 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17207 * receive sequence is only partially assembed by the driver, it shall abort
17208 * the partially assembled frames for the sequence. Otherwise, if the
17209 * unsolicited receive sequence has been completely assembled and passed to
17210 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17211 * unsolicited sequence has been aborted. After that, it will issue a basic
17212 * accept to accept the abort.
17215 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17216 struct hbq_dmabuf *dmabuf)
17218 struct lpfc_hba *phba = vport->phba;
17219 struct fc_frame_header fc_hdr;
17223 /* Make a copy of fc_hdr before the dmabuf being released */
17224 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17225 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17227 if (fctl & FC_FC_EX_CTX) {
17228 /* ABTS by responder to exchange, no cleanup needed */
17231 /* ABTS by initiator to exchange, need to do cleanup */
17232 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17233 if (aborted == false)
17234 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17236 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17238 if (phba->nvmet_support) {
17239 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17243 /* Respond with BA_ACC or BA_RJT accordingly */
17244 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17248 * lpfc_seq_complete - Indicates if a sequence is complete
17249 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17251 * This function checks the sequence, starting with the frame described by
17252 * @dmabuf, to see if all the frames associated with this sequence are present.
17253 * the frames associated with this sequence are linked to the @dmabuf using the
17254 * dbuf list. This function looks for two major things. 1) That the first frame
17255 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17256 * set. 3) That there are no holes in the sequence count. The function will
17257 * return 1 when the sequence is complete, otherwise it will return 0.
17260 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17262 struct fc_frame_header *hdr;
17263 struct lpfc_dmabuf *d_buf;
17264 struct hbq_dmabuf *seq_dmabuf;
17268 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17269 /* make sure first fame of sequence has a sequence count of zero */
17270 if (hdr->fh_seq_cnt != seq_count)
17272 fctl = (hdr->fh_f_ctl[0] << 16 |
17273 hdr->fh_f_ctl[1] << 8 |
17275 /* If last frame of sequence we can return success. */
17276 if (fctl & FC_FC_END_SEQ)
17278 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17279 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17280 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17281 /* If there is a hole in the sequence count then fail. */
17282 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17284 fctl = (hdr->fh_f_ctl[0] << 16 |
17285 hdr->fh_f_ctl[1] << 8 |
17287 /* If last frame of sequence we can return success. */
17288 if (fctl & FC_FC_END_SEQ)
17295 * lpfc_prep_seq - Prep sequence for ULP processing
17296 * @vport: Pointer to the vport on which this sequence was received
17297 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17299 * This function takes a sequence, described by a list of frames, and creates
17300 * a list of iocbq structures to describe the sequence. This iocbq list will be
17301 * used to issue to the generic unsolicited sequence handler. This routine
17302 * returns a pointer to the first iocbq in the list. If the function is unable
17303 * to allocate an iocbq then it throw out the received frames that were not
17304 * able to be described and return a pointer to the first iocbq. If unable to
17305 * allocate any iocbqs (including the first) this function will return NULL.
17307 static struct lpfc_iocbq *
17308 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17310 struct hbq_dmabuf *hbq_buf;
17311 struct lpfc_dmabuf *d_buf, *n_buf;
17312 struct lpfc_iocbq *first_iocbq, *iocbq;
17313 struct fc_frame_header *fc_hdr;
17315 uint32_t len, tot_len;
17316 struct ulp_bde64 *pbde;
17318 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17319 /* remove from receive buffer list */
17320 list_del_init(&seq_dmabuf->hbuf.list);
17321 lpfc_update_rcv_time_stamp(vport);
17322 /* get the Remote Port's SID */
17323 sid = sli4_sid_from_fc_hdr(fc_hdr);
17325 /* Get an iocbq struct to fill in. */
17326 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17328 /* Initialize the first IOCB. */
17329 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17330 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17331 first_iocbq->vport = vport;
17333 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17334 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17335 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17336 first_iocbq->iocb.un.rcvels.parmRo =
17337 sli4_did_from_fc_hdr(fc_hdr);
17338 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17340 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17341 first_iocbq->iocb.ulpContext = NO_XRI;
17342 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17343 be16_to_cpu(fc_hdr->fh_ox_id);
17344 /* iocbq is prepped for internal consumption. Physical vpi. */
17345 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17346 vport->phba->vpi_ids[vport->vpi];
17347 /* put the first buffer into the first IOCBq */
17348 tot_len = bf_get(lpfc_rcqe_length,
17349 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17351 first_iocbq->context2 = &seq_dmabuf->dbuf;
17352 first_iocbq->context3 = NULL;
17353 first_iocbq->iocb.ulpBdeCount = 1;
17354 if (tot_len > LPFC_DATA_BUF_SIZE)
17355 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17356 LPFC_DATA_BUF_SIZE;
17358 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17360 first_iocbq->iocb.un.rcvels.remoteID = sid;
17362 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17364 iocbq = first_iocbq;
17366 * Each IOCBq can have two Buffers assigned, so go through the list
17367 * of buffers for this sequence and save two buffers in each IOCBq
17369 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17371 lpfc_in_buf_free(vport->phba, d_buf);
17374 if (!iocbq->context3) {
17375 iocbq->context3 = d_buf;
17376 iocbq->iocb.ulpBdeCount++;
17377 /* We need to get the size out of the right CQE */
17378 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17379 len = bf_get(lpfc_rcqe_length,
17380 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17381 pbde = (struct ulp_bde64 *)
17382 &iocbq->iocb.unsli3.sli3Words[4];
17383 if (len > LPFC_DATA_BUF_SIZE)
17384 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17386 pbde->tus.f.bdeSize = len;
17388 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17391 iocbq = lpfc_sli_get_iocbq(vport->phba);
17394 first_iocbq->iocb.ulpStatus =
17395 IOSTAT_FCP_RSP_ERROR;
17396 first_iocbq->iocb.un.ulpWord[4] =
17397 IOERR_NO_RESOURCES;
17399 lpfc_in_buf_free(vport->phba, d_buf);
17402 /* We need to get the size out of the right CQE */
17403 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17404 len = bf_get(lpfc_rcqe_length,
17405 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17406 iocbq->context2 = d_buf;
17407 iocbq->context3 = NULL;
17408 iocbq->iocb.ulpBdeCount = 1;
17409 if (len > LPFC_DATA_BUF_SIZE)
17410 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17411 LPFC_DATA_BUF_SIZE;
17413 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17416 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17418 iocbq->iocb.un.rcvels.remoteID = sid;
17419 list_add_tail(&iocbq->list, &first_iocbq->list);
17422 /* Free the sequence's header buffer */
17424 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
17426 return first_iocbq;
17430 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17431 struct hbq_dmabuf *seq_dmabuf)
17433 struct fc_frame_header *fc_hdr;
17434 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17435 struct lpfc_hba *phba = vport->phba;
17437 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17438 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17440 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17441 "2707 Ring %d handler: Failed to allocate "
17442 "iocb Rctl x%x Type x%x received\n",
17444 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17447 if (!lpfc_complete_unsol_iocb(phba,
17448 phba->sli4_hba.els_wq->pring,
17449 iocbq, fc_hdr->fh_r_ctl,
17451 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17452 "2540 Ring %d handler: unexpected Rctl "
17453 "x%x Type x%x received\n",
17455 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17457 /* Free iocb created in lpfc_prep_seq */
17458 list_for_each_entry_safe(curr_iocb, next_iocb,
17459 &iocbq->list, list) {
17460 list_del_init(&curr_iocb->list);
17461 lpfc_sli_release_iocbq(phba, curr_iocb);
17463 lpfc_sli_release_iocbq(phba, iocbq);
17467 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17468 struct lpfc_iocbq *rspiocb)
17470 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17472 if (pcmd && pcmd->virt)
17473 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17475 lpfc_sli_release_iocbq(phba, cmdiocb);
17479 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17480 struct hbq_dmabuf *dmabuf)
17482 struct fc_frame_header *fc_hdr;
17483 struct lpfc_hba *phba = vport->phba;
17484 struct lpfc_iocbq *iocbq = NULL;
17485 union lpfc_wqe *wqe;
17486 struct lpfc_dmabuf *pcmd = NULL;
17487 uint32_t frame_len;
17490 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17491 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17493 /* Send the received frame back */
17494 iocbq = lpfc_sli_get_iocbq(phba);
17498 /* Allocate buffer for command payload */
17499 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17501 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17503 if (!pcmd || !pcmd->virt)
17506 INIT_LIST_HEAD(&pcmd->list);
17508 /* copyin the payload */
17509 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17511 /* fill in BDE's for command */
17512 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17513 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17514 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17515 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17517 iocbq->context2 = pcmd;
17518 iocbq->vport = vport;
17519 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17520 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17523 * Setup rest of the iocb as though it were a WQE
17524 * Build the SEND_FRAME WQE
17526 wqe = (union lpfc_wqe *)&iocbq->iocb;
17528 wqe->send_frame.frame_len = frame_len;
17529 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17530 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17531 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17532 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17533 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17534 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17536 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17537 iocbq->iocb.ulpLe = 1;
17538 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17539 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17540 if (rc == IOCB_ERROR)
17543 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17547 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17548 "2023 Unable to process MDS loopback frame\n");
17549 if (pcmd && pcmd->virt)
17550 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17553 lpfc_sli_release_iocbq(phba, iocbq);
17554 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17558 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
17559 * @phba: Pointer to HBA context object.
17561 * This function is called with no lock held. This function processes all
17562 * the received buffers and gives it to upper layers when a received buffer
17563 * indicates that it is the final frame in the sequence. The interrupt
17564 * service routine processes received buffers at interrupt contexts.
17565 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
17566 * appropriate receive function when the final frame in a sequence is received.
17569 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17570 struct hbq_dmabuf *dmabuf)
17572 struct hbq_dmabuf *seq_dmabuf;
17573 struct fc_frame_header *fc_hdr;
17574 struct lpfc_vport *vport;
17578 /* Process each received buffer */
17579 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17581 /* check to see if this a valid type of frame */
17582 if (lpfc_fc_frame_check(phba, fc_hdr)) {
17583 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17587 if ((bf_get(lpfc_cqe_code,
17588 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17589 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17590 &dmabuf->cq_event.cqe.rcqe_cmpl);
17592 fcfi = bf_get(lpfc_rcqe_fcf_id,
17593 &dmabuf->cq_event.cqe.rcqe_cmpl);
17595 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
17596 vport = phba->pport;
17597 /* Handle MDS Loopback frames */
17598 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17602 /* d_id this frame is directed to */
17603 did = sli4_did_from_fc_hdr(fc_hdr);
17605 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
17607 /* throw out the frame */
17608 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17612 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
17613 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
17614 (did != Fabric_DID)) {
17616 * Throw out the frame if we are not pt2pt.
17617 * The pt2pt protocol allows for discovery frames
17618 * to be received without a registered VPI.
17620 if (!(vport->fc_flag & FC_PT2PT) ||
17621 (phba->link_state == LPFC_HBA_READY)) {
17622 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17627 /* Handle the basic abort sequence (BA_ABTS) event */
17628 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
17629 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
17633 /* Link this frame */
17634 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
17636 /* unable to add frame to vport - throw it out */
17637 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17640 /* If not last frame in sequence continue processing frames. */
17641 if (!lpfc_seq_complete(seq_dmabuf))
17644 /* Send the complete sequence to the upper layer protocol */
17645 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
17649 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
17650 * @phba: pointer to lpfc hba data structure.
17652 * This routine is invoked to post rpi header templates to the
17653 * HBA consistent with the SLI-4 interface spec. This routine
17654 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17655 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17657 * This routine does not require any locks. It's usage is expected
17658 * to be driver load or reset recovery when the driver is
17663 * -EIO - The mailbox failed to complete successfully.
17664 * When this error occurs, the driver is not guaranteed
17665 * to have any rpi regions posted to the device and
17666 * must either attempt to repost the regions or take a
17670 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
17672 struct lpfc_rpi_hdr *rpi_page;
17676 /* SLI4 ports that support extents do not require RPI headers. */
17677 if (!phba->sli4_hba.rpi_hdrs_in_use)
17679 if (phba->sli4_hba.extents_in_use)
17682 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
17684 * Assign the rpi headers a physical rpi only if the driver
17685 * has not initialized those resources. A port reset only
17686 * needs the headers posted.
17688 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
17690 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17692 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
17693 if (rc != MBX_SUCCESS) {
17694 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17695 "2008 Error %d posting all rpi "
17703 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
17704 LPFC_RPI_RSRC_RDY);
17709 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
17710 * @phba: pointer to lpfc hba data structure.
17711 * @rpi_page: pointer to the rpi memory region.
17713 * This routine is invoked to post a single rpi header to the
17714 * HBA consistent with the SLI-4 interface spec. This memory region
17715 * maps up to 64 rpi context regions.
17719 * -ENOMEM - No available memory
17720 * -EIO - The mailbox failed to complete successfully.
17723 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
17725 LPFC_MBOXQ_t *mboxq;
17726 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
17728 uint32_t shdr_status, shdr_add_status;
17729 union lpfc_sli4_cfg_shdr *shdr;
17731 /* SLI4 ports that support extents do not require RPI headers. */
17732 if (!phba->sli4_hba.rpi_hdrs_in_use)
17734 if (phba->sli4_hba.extents_in_use)
17737 /* The port is notified of the header region via a mailbox command. */
17738 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17740 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17741 "2001 Unable to allocate memory for issuing "
17742 "SLI_CONFIG_SPECIAL mailbox command\n");
17746 /* Post all rpi memory regions to the port. */
17747 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
17748 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17749 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
17750 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
17751 sizeof(struct lpfc_sli4_cfg_mhdr),
17752 LPFC_SLI4_MBX_EMBED);
17755 /* Post the physical rpi to the port for this rpi header. */
17756 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
17757 rpi_page->start_rpi);
17758 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
17759 hdr_tmpl, rpi_page->page_count);
17761 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
17762 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
17763 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
17764 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
17765 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17766 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17767 if (rc != MBX_TIMEOUT)
17768 mempool_free(mboxq, phba->mbox_mem_pool);
17769 if (shdr_status || shdr_add_status || rc) {
17770 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17771 "2514 POST_RPI_HDR mailbox failed with "
17772 "status x%x add_status x%x, mbx status x%x\n",
17773 shdr_status, shdr_add_status, rc);
17777 * The next_rpi stores the next logical module-64 rpi value used
17778 * to post physical rpis in subsequent rpi postings.
17780 spin_lock_irq(&phba->hbalock);
17781 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
17782 spin_unlock_irq(&phba->hbalock);
17788 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
17789 * @phba: pointer to lpfc hba data structure.
17791 * This routine is invoked to post rpi header templates to the
17792 * HBA consistent with the SLI-4 interface spec. This routine
17793 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17794 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17797 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17798 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17801 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
17804 uint16_t max_rpi, rpi_limit;
17805 uint16_t rpi_remaining, lrpi = 0;
17806 struct lpfc_rpi_hdr *rpi_hdr;
17807 unsigned long iflag;
17810 * Fetch the next logical rpi. Because this index is logical,
17811 * the driver starts at 0 each time.
17813 spin_lock_irqsave(&phba->hbalock, iflag);
17814 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
17815 rpi_limit = phba->sli4_hba.next_rpi;
17817 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
17818 if (rpi >= rpi_limit)
17819 rpi = LPFC_RPI_ALLOC_ERROR;
17821 set_bit(rpi, phba->sli4_hba.rpi_bmask);
17822 phba->sli4_hba.max_cfg_param.rpi_used++;
17823 phba->sli4_hba.rpi_count++;
17825 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
17826 "0001 rpi:%x max:%x lim:%x\n",
17827 (int) rpi, max_rpi, rpi_limit);
17830 * Don't try to allocate more rpi header regions if the device limit
17831 * has been exhausted.
17833 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
17834 (phba->sli4_hba.rpi_count >= max_rpi)) {
17835 spin_unlock_irqrestore(&phba->hbalock, iflag);
17840 * RPI header postings are not required for SLI4 ports capable of
17843 if (!phba->sli4_hba.rpi_hdrs_in_use) {
17844 spin_unlock_irqrestore(&phba->hbalock, iflag);
17849 * If the driver is running low on rpi resources, allocate another
17850 * page now. Note that the next_rpi value is used because
17851 * it represents how many are actually in use whereas max_rpi notes
17852 * how many are supported max by the device.
17854 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
17855 spin_unlock_irqrestore(&phba->hbalock, iflag);
17856 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
17857 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
17859 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17860 "2002 Error Could not grow rpi "
17863 lrpi = rpi_hdr->start_rpi;
17864 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17865 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
17873 * lpfc_sli4_free_rpi - Release an rpi for reuse.
17874 * @phba: pointer to lpfc hba data structure.
17876 * This routine is invoked to release an rpi to the pool of
17877 * available rpis maintained by the driver.
17880 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17883 * if the rpi value indicates a prior unreg has already
17884 * been done, skip the unreg.
17886 if (rpi == LPFC_RPI_ALLOC_ERROR)
17889 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
17890 phba->sli4_hba.rpi_count--;
17891 phba->sli4_hba.max_cfg_param.rpi_used--;
17896 * lpfc_sli4_free_rpi - Release an rpi for reuse.
17897 * @phba: pointer to lpfc hba data structure.
17899 * This routine is invoked to release an rpi to the pool of
17900 * available rpis maintained by the driver.
17903 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17905 spin_lock_irq(&phba->hbalock);
17906 __lpfc_sli4_free_rpi(phba, rpi);
17907 spin_unlock_irq(&phba->hbalock);
17911 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
17912 * @phba: pointer to lpfc hba data structure.
17914 * This routine is invoked to remove the memory region that
17915 * provided rpi via a bitmask.
17918 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
17920 kfree(phba->sli4_hba.rpi_bmask);
17921 kfree(phba->sli4_hba.rpi_ids);
17922 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
17926 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
17927 * @phba: pointer to lpfc hba data structure.
17929 * This routine is invoked to remove the memory region that
17930 * provided rpi via a bitmask.
17933 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
17934 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
17936 LPFC_MBOXQ_t *mboxq;
17937 struct lpfc_hba *phba = ndlp->phba;
17940 /* The port is notified of the header region via a mailbox command. */
17941 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17945 /* Post all rpi memory regions to the port. */
17946 lpfc_resume_rpi(mboxq, ndlp);
17948 mboxq->mbox_cmpl = cmpl;
17949 mboxq->context1 = arg;
17950 mboxq->context2 = ndlp;
17952 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17953 mboxq->vport = ndlp->vport;
17954 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17955 if (rc == MBX_NOT_FINISHED) {
17956 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17957 "2010 Resume RPI Mailbox failed "
17958 "status %d, mbxStatus x%x\n", rc,
17959 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17960 mempool_free(mboxq, phba->mbox_mem_pool);
17967 * lpfc_sli4_init_vpi - Initialize a vpi with the port
17968 * @vport: Pointer to the vport for which the vpi is being initialized
17970 * This routine is invoked to activate a vpi with the port.
17974 * -Evalue otherwise
17977 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
17979 LPFC_MBOXQ_t *mboxq;
17981 int retval = MBX_SUCCESS;
17983 struct lpfc_hba *phba = vport->phba;
17984 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17987 lpfc_init_vpi(phba, mboxq, vport->vpi);
17988 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
17989 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
17990 if (rc != MBX_SUCCESS) {
17991 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
17992 "2022 INIT VPI Mailbox failed "
17993 "status %d, mbxStatus x%x\n", rc,
17994 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17997 if (rc != MBX_TIMEOUT)
17998 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18004 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18005 * @phba: pointer to lpfc hba data structure.
18006 * @mboxq: Pointer to mailbox object.
18008 * This routine is invoked to manually add a single FCF record. The caller
18009 * must pass a completely initialized FCF_Record. This routine takes
18010 * care of the nonembedded mailbox operations.
18013 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18016 union lpfc_sli4_cfg_shdr *shdr;
18017 uint32_t shdr_status, shdr_add_status;
18019 virt_addr = mboxq->sge_array->addr[0];
18020 /* The IOCTL status is embedded in the mailbox subheader. */
18021 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18022 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18023 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18025 if ((shdr_status || shdr_add_status) &&
18026 (shdr_status != STATUS_FCF_IN_USE))
18027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18028 "2558 ADD_FCF_RECORD mailbox failed with "
18029 "status x%x add_status x%x\n",
18030 shdr_status, shdr_add_status);
18032 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18036 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18037 * @phba: pointer to lpfc hba data structure.
18038 * @fcf_record: pointer to the initialized fcf record to add.
18040 * This routine is invoked to manually add a single FCF record. The caller
18041 * must pass a completely initialized FCF_Record. This routine takes
18042 * care of the nonembedded mailbox operations.
18045 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18048 LPFC_MBOXQ_t *mboxq;
18051 struct lpfc_mbx_sge sge;
18052 uint32_t alloc_len, req_len;
18055 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18058 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18062 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18065 /* Allocate DMA memory and set up the non-embedded mailbox command */
18066 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18067 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18068 req_len, LPFC_SLI4_MBX_NEMBED);
18069 if (alloc_len < req_len) {
18070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18071 "2523 Allocated DMA memory size (x%x) is "
18072 "less than the requested DMA memory "
18073 "size (x%x)\n", alloc_len, req_len);
18074 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18079 * Get the first SGE entry from the non-embedded DMA memory. This
18080 * routine only uses a single SGE.
18082 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18083 virt_addr = mboxq->sge_array->addr[0];
18085 * Configure the FCF record for FCFI 0. This is the driver's
18086 * hardcoded default and gets used in nonFIP mode.
18088 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18089 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18090 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18093 * Copy the fcf_index and the FCF Record Data. The data starts after
18094 * the FCoE header plus word10. The data copy needs to be endian
18097 bytep += sizeof(uint32_t);
18098 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18099 mboxq->vport = phba->pport;
18100 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18101 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18102 if (rc == MBX_NOT_FINISHED) {
18103 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18104 "2515 ADD_FCF_RECORD mailbox failed with "
18105 "status 0x%x\n", rc);
18106 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18115 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18116 * @phba: pointer to lpfc hba data structure.
18117 * @fcf_record: pointer to the fcf record to write the default data.
18118 * @fcf_index: FCF table entry index.
18120 * This routine is invoked to build the driver's default FCF record. The
18121 * values used are hardcoded. This routine handles memory initialization.
18125 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18126 struct fcf_record *fcf_record,
18127 uint16_t fcf_index)
18129 memset(fcf_record, 0, sizeof(struct fcf_record));
18130 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18131 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18132 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18133 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18134 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18135 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18136 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18137 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18138 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18139 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18140 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18141 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18142 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18143 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18144 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18145 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18146 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18147 /* Set the VLAN bit map */
18148 if (phba->valid_vlan) {
18149 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18150 = 1 << (phba->vlan_id % 8);
18155 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
18156 * @phba: pointer to lpfc hba data structure.
18157 * @fcf_index: FCF table entry offset.
18159 * This routine is invoked to scan the entire FCF table by reading FCF
18160 * record and processing it one at a time starting from the @fcf_index
18161 * for initial FCF discovery or fast FCF failover rediscovery.
18163 * Return 0 if the mailbox command is submitted successfully, none 0
18167 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18170 LPFC_MBOXQ_t *mboxq;
18172 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18173 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18174 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18176 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18177 "2000 Failed to allocate mbox for "
18180 goto fail_fcf_scan;
18182 /* Construct the read FCF record mailbox command */
18183 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18186 goto fail_fcf_scan;
18188 /* Issue the mailbox command asynchronously */
18189 mboxq->vport = phba->pport;
18190 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18192 spin_lock_irq(&phba->hbalock);
18193 phba->hba_flag |= FCF_TS_INPROG;
18194 spin_unlock_irq(&phba->hbalock);
18196 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18197 if (rc == MBX_NOT_FINISHED)
18200 /* Reset eligible FCF count for new scan */
18201 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18202 phba->fcf.eligible_fcf_cnt = 0;
18208 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18209 /* FCF scan failed, clear FCF_TS_INPROG flag */
18210 spin_lock_irq(&phba->hbalock);
18211 phba->hba_flag &= ~FCF_TS_INPROG;
18212 spin_unlock_irq(&phba->hbalock);
18218 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
18219 * @phba: pointer to lpfc hba data structure.
18220 * @fcf_index: FCF table entry offset.
18222 * This routine is invoked to read an FCF record indicated by @fcf_index
18223 * and to use it for FLOGI roundrobin FCF failover.
18225 * Return 0 if the mailbox command is submitted successfully, none 0
18229 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18232 LPFC_MBOXQ_t *mboxq;
18234 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18236 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18237 "2763 Failed to allocate mbox for "
18240 goto fail_fcf_read;
18242 /* Construct the read FCF record mailbox command */
18243 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18246 goto fail_fcf_read;
18248 /* Issue the mailbox command asynchronously */
18249 mboxq->vport = phba->pport;
18250 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18251 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18252 if (rc == MBX_NOT_FINISHED)
18258 if (error && mboxq)
18259 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18264 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18265 * @phba: pointer to lpfc hba data structure.
18266 * @fcf_index: FCF table entry offset.
18268 * This routine is invoked to read an FCF record indicated by @fcf_index to
18269 * determine whether it's eligible for FLOGI roundrobin failover list.
18271 * Return 0 if the mailbox command is submitted successfully, none 0
18275 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18278 LPFC_MBOXQ_t *mboxq;
18280 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18282 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18283 "2758 Failed to allocate mbox for "
18286 goto fail_fcf_read;
18288 /* Construct the read FCF record mailbox command */
18289 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18292 goto fail_fcf_read;
18294 /* Issue the mailbox command asynchronously */
18295 mboxq->vport = phba->pport;
18296 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18297 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18298 if (rc == MBX_NOT_FINISHED)
18304 if (error && mboxq)
18305 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18310 * lpfc_check_next_fcf_pri_level
18311 * phba pointer to the lpfc_hba struct for this port.
18312 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18313 * routine when the rr_bmask is empty. The FCF indecies are put into the
18314 * rr_bmask based on their priority level. Starting from the highest priority
18315 * to the lowest. The most likely FCF candidate will be in the highest
18316 * priority group. When this routine is called it searches the fcf_pri list for
18317 * next lowest priority group and repopulates the rr_bmask with only those
18320 * 1=success 0=failure
18323 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18325 uint16_t next_fcf_pri;
18326 uint16_t last_index;
18327 struct lpfc_fcf_pri *fcf_pri;
18331 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18332 LPFC_SLI4_FCF_TBL_INDX_MAX);
18333 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18334 "3060 Last IDX %d\n", last_index);
18336 /* Verify the priority list has 2 or more entries */
18337 spin_lock_irq(&phba->hbalock);
18338 if (list_empty(&phba->fcf.fcf_pri_list) ||
18339 list_is_singular(&phba->fcf.fcf_pri_list)) {
18340 spin_unlock_irq(&phba->hbalock);
18341 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18342 "3061 Last IDX %d\n", last_index);
18343 return 0; /* Empty rr list */
18345 spin_unlock_irq(&phba->hbalock);
18349 * Clear the rr_bmask and set all of the bits that are at this
18352 memset(phba->fcf.fcf_rr_bmask, 0,
18353 sizeof(*phba->fcf.fcf_rr_bmask));
18354 spin_lock_irq(&phba->hbalock);
18355 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18356 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18359 * the 1st priority that has not FLOGI failed
18360 * will be the highest.
18363 next_fcf_pri = fcf_pri->fcf_rec.priority;
18364 spin_unlock_irq(&phba->hbalock);
18365 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18366 rc = lpfc_sli4_fcf_rr_index_set(phba,
18367 fcf_pri->fcf_rec.fcf_index);
18371 spin_lock_irq(&phba->hbalock);
18374 * if next_fcf_pri was not set above and the list is not empty then
18375 * we have failed flogis on all of them. So reset flogi failed
18376 * and start at the beginning.
18378 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18379 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18380 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18382 * the 1st priority that has not FLOGI failed
18383 * will be the highest.
18386 next_fcf_pri = fcf_pri->fcf_rec.priority;
18387 spin_unlock_irq(&phba->hbalock);
18388 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18389 rc = lpfc_sli4_fcf_rr_index_set(phba,
18390 fcf_pri->fcf_rec.fcf_index);
18394 spin_lock_irq(&phba->hbalock);
18398 spin_unlock_irq(&phba->hbalock);
18403 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18404 * @phba: pointer to lpfc hba data structure.
18406 * This routine is to get the next eligible FCF record index in a round
18407 * robin fashion. If the next eligible FCF record index equals to the
18408 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
18409 * shall be returned, otherwise, the next eligible FCF record's index
18410 * shall be returned.
18413 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18415 uint16_t next_fcf_index;
18418 /* Search start from next bit of currently registered FCF index */
18419 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18422 /* Determine the next fcf index to check */
18423 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18424 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18425 LPFC_SLI4_FCF_TBL_INDX_MAX,
18428 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
18429 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18431 * If we have wrapped then we need to clear the bits that
18432 * have been tested so that we can detect when we should
18433 * change the priority level.
18435 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18436 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18440 /* Check roundrobin failover list empty condition */
18441 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18442 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18444 * If next fcf index is not found check if there are lower
18445 * Priority level fcf's in the fcf_priority list.
18446 * Set up the rr_bmask with all of the avaiable fcf bits
18447 * at that level and continue the selection process.
18449 if (lpfc_check_next_fcf_pri_level(phba))
18450 goto initial_priority;
18451 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18452 "2844 No roundrobin failover FCF available\n");
18454 return LPFC_FCOE_FCF_NEXT_NONE;
18457 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18458 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18459 LPFC_FCF_FLOGI_FAILED) {
18460 if (list_is_singular(&phba->fcf.fcf_pri_list))
18461 return LPFC_FCOE_FCF_NEXT_NONE;
18463 goto next_priority;
18466 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18467 "2845 Get next roundrobin failover FCF (x%x)\n",
18470 return next_fcf_index;
18474 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18475 * @phba: pointer to lpfc hba data structure.
18477 * This routine sets the FCF record index in to the eligible bmask for
18478 * roundrobin failover search. It checks to make sure that the index
18479 * does not go beyond the range of the driver allocated bmask dimension
18480 * before setting the bit.
18482 * Returns 0 if the index bit successfully set, otherwise, it returns
18486 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18488 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18489 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18490 "2610 FCF (x%x) reached driver's book "
18491 "keeping dimension:x%x\n",
18492 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18495 /* Set the eligible FCF record index bmask */
18496 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18498 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18499 "2790 Set FCF (x%x) to roundrobin FCF failover "
18500 "bmask\n", fcf_index);
18506 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
18507 * @phba: pointer to lpfc hba data structure.
18509 * This routine clears the FCF record index from the eligible bmask for
18510 * roundrobin failover search. It checks to make sure that the index
18511 * does not go beyond the range of the driver allocated bmask dimension
18512 * before clearing the bit.
18515 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18517 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18518 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18519 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18520 "2762 FCF (x%x) reached driver's book "
18521 "keeping dimension:x%x\n",
18522 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18525 /* Clear the eligible FCF record index bmask */
18526 spin_lock_irq(&phba->hbalock);
18527 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18529 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18530 list_del_init(&fcf_pri->list);
18534 spin_unlock_irq(&phba->hbalock);
18535 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18537 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18538 "2791 Clear FCF (x%x) from roundrobin failover "
18539 "bmask\n", fcf_index);
18543 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18544 * @phba: pointer to lpfc hba data structure.
18546 * This routine is the completion routine for the rediscover FCF table mailbox
18547 * command. If the mailbox command returned failure, it will try to stop the
18548 * FCF rediscover wait timer.
18551 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18553 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18554 uint32_t shdr_status, shdr_add_status;
18556 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18558 shdr_status = bf_get(lpfc_mbox_hdr_status,
18559 &redisc_fcf->header.cfg_shdr.response);
18560 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18561 &redisc_fcf->header.cfg_shdr.response);
18562 if (shdr_status || shdr_add_status) {
18563 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18564 "2746 Requesting for FCF rediscovery failed "
18565 "status x%x add_status x%x\n",
18566 shdr_status, shdr_add_status);
18567 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
18568 spin_lock_irq(&phba->hbalock);
18569 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
18570 spin_unlock_irq(&phba->hbalock);
18572 * CVL event triggered FCF rediscover request failed,
18573 * last resort to re-try current registered FCF entry.
18575 lpfc_retry_pport_discovery(phba);
18577 spin_lock_irq(&phba->hbalock);
18578 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
18579 spin_unlock_irq(&phba->hbalock);
18581 * DEAD FCF event triggered FCF rediscover request
18582 * failed, last resort to fail over as a link down
18583 * to FCF registration.
18585 lpfc_sli4_fcf_dead_failthrough(phba);
18588 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18589 "2775 Start FCF rediscover quiescent timer\n");
18591 * Start FCF rediscovery wait timer for pending FCF
18592 * before rescan FCF record table.
18594 lpfc_fcf_redisc_wait_start_timer(phba);
18597 mempool_free(mbox, phba->mbox_mem_pool);
18601 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
18602 * @phba: pointer to lpfc hba data structure.
18604 * This routine is invoked to request for rediscovery of the entire FCF table
18608 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18610 LPFC_MBOXQ_t *mbox;
18611 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18614 /* Cancel retry delay timers to all vports before FCF rediscover */
18615 lpfc_cancel_all_vport_retry_delay_timer(phba);
18617 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18619 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18620 "2745 Failed to allocate mbox for "
18621 "requesting FCF rediscover.\n");
18625 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
18626 sizeof(struct lpfc_sli4_cfg_mhdr));
18627 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18628 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
18629 length, LPFC_SLI4_MBX_EMBED);
18631 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18632 /* Set count to 0 for invalidating the entire FCF database */
18633 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
18635 /* Issue the mailbox command asynchronously */
18636 mbox->vport = phba->pport;
18637 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18638 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18640 if (rc == MBX_NOT_FINISHED) {
18641 mempool_free(mbox, phba->mbox_mem_pool);
18648 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
18649 * @phba: pointer to lpfc hba data structure.
18651 * This function is the failover routine as a last resort to the FCF DEAD
18652 * event when driver failed to perform fast FCF failover.
18655 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
18657 uint32_t link_state;
18660 * Last resort as FCF DEAD event failover will treat this as
18661 * a link down, but save the link state because we don't want
18662 * it to be changed to Link Down unless it is already down.
18664 link_state = phba->link_state;
18665 lpfc_linkdown(phba);
18666 phba->link_state = link_state;
18668 /* Unregister FCF if no devices connected to it */
18669 lpfc_unregister_unused_fcf(phba);
18673 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
18674 * @phba: pointer to lpfc hba data structure.
18675 * @rgn23_data: pointer to configure region 23 data.
18677 * This function gets SLI3 port configure region 23 data through memory dump
18678 * mailbox command. When it successfully retrieves data, the size of the data
18679 * will be returned, otherwise, 0 will be returned.
18682 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18684 LPFC_MBOXQ_t *pmb = NULL;
18686 uint32_t offset = 0;
18692 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18695 "2600 failed to allocate mailbox memory\n");
18701 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
18702 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
18704 if (rc != MBX_SUCCESS) {
18705 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
18706 "2601 failed to read config "
18707 "region 23, rc 0x%x Status 0x%x\n",
18708 rc, mb->mbxStatus);
18709 mb->un.varDmp.word_cnt = 0;
18712 * dump mem may return a zero when finished or we got a
18713 * mailbox error, either way we are done.
18715 if (mb->un.varDmp.word_cnt == 0)
18717 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
18718 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
18720 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
18721 rgn23_data + offset,
18722 mb->un.varDmp.word_cnt);
18723 offset += mb->un.varDmp.word_cnt;
18724 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
18726 mempool_free(pmb, phba->mbox_mem_pool);
18731 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
18732 * @phba: pointer to lpfc hba data structure.
18733 * @rgn23_data: pointer to configure region 23 data.
18735 * This function gets SLI4 port configure region 23 data through memory dump
18736 * mailbox command. When it successfully retrieves data, the size of the data
18737 * will be returned, otherwise, 0 will be returned.
18740 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18742 LPFC_MBOXQ_t *mboxq = NULL;
18743 struct lpfc_dmabuf *mp = NULL;
18744 struct lpfc_mqe *mqe;
18745 uint32_t data_length = 0;
18751 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18754 "3105 failed to allocate mailbox memory\n");
18758 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
18760 mqe = &mboxq->u.mqe;
18761 mp = (struct lpfc_dmabuf *) mboxq->context1;
18762 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18765 data_length = mqe->un.mb_words[5];
18766 if (data_length == 0)
18768 if (data_length > DMP_RGN23_SIZE) {
18772 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
18774 mempool_free(mboxq, phba->mbox_mem_pool);
18776 lpfc_mbuf_free(phba, mp->virt, mp->phys);
18779 return data_length;
18783 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
18784 * @phba: pointer to lpfc hba data structure.
18786 * This function read region 23 and parse TLV for port status to
18787 * decide if the user disaled the port. If the TLV indicates the
18788 * port is disabled, the hba_flag is set accordingly.
18791 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
18793 uint8_t *rgn23_data = NULL;
18794 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
18795 uint32_t offset = 0;
18797 /* Get adapter Region 23 data */
18798 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
18802 if (phba->sli_rev < LPFC_SLI_REV4)
18803 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
18805 if_type = bf_get(lpfc_sli_intf_if_type,
18806 &phba->sli4_hba.sli_intf);
18807 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
18809 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
18815 /* Check the region signature first */
18816 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
18817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18818 "2619 Config region 23 has bad signature\n");
18823 /* Check the data structure version */
18824 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
18825 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18826 "2620 Config region 23 has bad version\n");
18831 /* Parse TLV entries in the region */
18832 while (offset < data_size) {
18833 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
18836 * If the TLV is not driver specific TLV or driver id is
18837 * not linux driver id, skip the record.
18839 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
18840 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
18841 (rgn23_data[offset + 3] != 0)) {
18842 offset += rgn23_data[offset + 1] * 4 + 4;
18846 /* Driver found a driver specific TLV in the config region */
18847 sub_tlv_len = rgn23_data[offset + 1] * 4;
18852 * Search for configured port state sub-TLV.
18854 while ((offset < data_size) &&
18855 (tlv_offset < sub_tlv_len)) {
18856 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
18861 if (rgn23_data[offset] != PORT_STE_TYPE) {
18862 offset += rgn23_data[offset + 1] * 4 + 4;
18863 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
18867 /* This HBA contains PORT_STE configured */
18868 if (!rgn23_data[offset + 2])
18869 phba->hba_flag |= LINK_DISABLED;
18881 * lpfc_wr_object - write an object to the firmware
18882 * @phba: HBA structure that indicates port to create a queue on.
18883 * @dmabuf_list: list of dmabufs to write to the port.
18884 * @size: the total byte value of the objects to write to the port.
18885 * @offset: the current offset to be used to start the transfer.
18887 * This routine will create a wr_object mailbox command to send to the port.
18888 * the mailbox command will be constructed using the dma buffers described in
18889 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
18890 * BDEs that the imbedded mailbox can support. The @offset variable will be
18891 * used to indicate the starting offset of the transfer and will also return
18892 * the offset after the write object mailbox has completed. @size is used to
18893 * determine the end of the object and whether the eof bit should be set.
18895 * Return 0 is successful and offset will contain the the new offset to use
18896 * for the next write.
18897 * Return negative value for error cases.
18900 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
18901 uint32_t size, uint32_t *offset)
18903 struct lpfc_mbx_wr_object *wr_object;
18904 LPFC_MBOXQ_t *mbox;
18906 uint32_t shdr_status, shdr_add_status;
18908 union lpfc_sli4_cfg_shdr *shdr;
18909 struct lpfc_dmabuf *dmabuf;
18910 uint32_t written = 0;
18912 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18916 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
18917 LPFC_MBOX_OPCODE_WRITE_OBJECT,
18918 sizeof(struct lpfc_mbx_wr_object) -
18919 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
18921 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
18922 wr_object->u.request.write_offset = *offset;
18923 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
18924 wr_object->u.request.object_name[0] =
18925 cpu_to_le32(wr_object->u.request.object_name[0]);
18926 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
18927 list_for_each_entry(dmabuf, dmabuf_list, list) {
18928 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
18930 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
18931 wr_object->u.request.bde[i].addrHigh =
18932 putPaddrHigh(dmabuf->phys);
18933 if (written + SLI4_PAGE_SIZE >= size) {
18934 wr_object->u.request.bde[i].tus.f.bdeSize =
18936 written += (size - written);
18937 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
18939 wr_object->u.request.bde[i].tus.f.bdeSize =
18941 written += SLI4_PAGE_SIZE;
18945 wr_object->u.request.bde_count = i;
18946 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
18947 if (!phba->sli4_hba.intr_enable)
18948 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18950 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18951 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18953 /* The IOCTL status is embedded in the mailbox subheader. */
18954 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
18955 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18956 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18957 if (rc != MBX_TIMEOUT)
18958 mempool_free(mbox, phba->mbox_mem_pool);
18959 if (shdr_status || shdr_add_status || rc) {
18960 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18961 "3025 Write Object mailbox failed with "
18962 "status x%x add_status x%x, mbx status x%x\n",
18963 shdr_status, shdr_add_status, rc);
18965 *offset = shdr_add_status;
18967 *offset += wr_object->u.response.actual_write_length;
18972 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
18973 * @vport: pointer to vport data structure.
18975 * This function iterate through the mailboxq and clean up all REG_LOGIN
18976 * and REG_VPI mailbox commands associated with the vport. This function
18977 * is called when driver want to restart discovery of the vport due to
18978 * a Clear Virtual Link event.
18981 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
18983 struct lpfc_hba *phba = vport->phba;
18984 LPFC_MBOXQ_t *mb, *nextmb;
18985 struct lpfc_dmabuf *mp;
18986 struct lpfc_nodelist *ndlp;
18987 struct lpfc_nodelist *act_mbx_ndlp = NULL;
18988 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
18989 LIST_HEAD(mbox_cmd_list);
18990 uint8_t restart_loop;
18992 /* Clean up internally queued mailbox commands with the vport */
18993 spin_lock_irq(&phba->hbalock);
18994 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
18995 if (mb->vport != vport)
18998 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18999 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19002 list_del(&mb->list);
19003 list_add_tail(&mb->list, &mbox_cmd_list);
19005 /* Clean up active mailbox command with the vport */
19006 mb = phba->sli.mbox_active;
19007 if (mb && (mb->vport == vport)) {
19008 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19009 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19010 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19011 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19012 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
19013 /* Put reference count for delayed processing */
19014 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19015 /* Unregister the RPI when mailbox complete */
19016 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19019 /* Cleanup any mailbox completions which are not yet processed */
19022 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19024 * If this mailox is already processed or it is
19025 * for another vport ignore it.
19027 if ((mb->vport != vport) ||
19028 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19031 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19032 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19035 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19036 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19037 ndlp = (struct lpfc_nodelist *)mb->context2;
19038 /* Unregister the RPI when mailbox complete */
19039 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19041 spin_unlock_irq(&phba->hbalock);
19042 spin_lock(shost->host_lock);
19043 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19044 spin_unlock(shost->host_lock);
19045 spin_lock_irq(&phba->hbalock);
19049 } while (restart_loop);
19051 spin_unlock_irq(&phba->hbalock);
19053 /* Release the cleaned-up mailbox commands */
19054 while (!list_empty(&mbox_cmd_list)) {
19055 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19056 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19057 mp = (struct lpfc_dmabuf *) (mb->context1);
19059 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19062 ndlp = (struct lpfc_nodelist *) mb->context2;
19063 mb->context2 = NULL;
19065 spin_lock(shost->host_lock);
19066 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19067 spin_unlock(shost->host_lock);
19068 lpfc_nlp_put(ndlp);
19071 mempool_free(mb, phba->mbox_mem_pool);
19074 /* Release the ndlp with the cleaned-up active mailbox command */
19075 if (act_mbx_ndlp) {
19076 spin_lock(shost->host_lock);
19077 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19078 spin_unlock(shost->host_lock);
19079 lpfc_nlp_put(act_mbx_ndlp);
19084 * lpfc_drain_txq - Drain the txq
19085 * @phba: Pointer to HBA context object.
19087 * This function attempt to submit IOCBs on the txq
19088 * to the adapter. For SLI4 adapters, the txq contains
19089 * ELS IOCBs that have been deferred because the there
19090 * are no SGLs. This congestion can occur with large
19091 * vport counts during node discovery.
19095 lpfc_drain_txq(struct lpfc_hba *phba)
19097 LIST_HEAD(completions);
19098 struct lpfc_sli_ring *pring;
19099 struct lpfc_iocbq *piocbq = NULL;
19100 unsigned long iflags = 0;
19101 char *fail_msg = NULL;
19102 struct lpfc_sglq *sglq;
19103 union lpfc_wqe128 wqe;
19104 uint32_t txq_cnt = 0;
19105 struct lpfc_queue *wq;
19107 if (phba->link_flag & LS_MDS_LOOPBACK) {
19108 /* MDS WQE are posted only to first WQ*/
19109 wq = phba->sli4_hba.fcp_wq[0];
19114 wq = phba->sli4_hba.els_wq;
19117 pring = lpfc_phba_elsring(phba);
19120 if (unlikely(!pring) || list_empty(&pring->txq))
19123 spin_lock_irqsave(&pring->ring_lock, iflags);
19124 list_for_each_entry(piocbq, &pring->txq, list) {
19128 if (txq_cnt > pring->txq_max)
19129 pring->txq_max = txq_cnt;
19131 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19133 while (!list_empty(&pring->txq)) {
19134 spin_lock_irqsave(&pring->ring_lock, iflags);
19136 piocbq = lpfc_sli_ringtx_get(phba, pring);
19138 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19139 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19140 "2823 txq empty and txq_cnt is %d\n ",
19144 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19146 __lpfc_sli_ringtx_put(phba, pring, piocbq);
19147 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19152 /* The xri and iocb resources secured,
19153 * attempt to issue request
19155 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19156 piocbq->sli4_xritag = sglq->sli4_xritag;
19157 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19158 fail_msg = "to convert bpl to sgl";
19159 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19160 fail_msg = "to convert iocb to wqe";
19161 else if (lpfc_sli4_wq_put(wq, &wqe))
19162 fail_msg = " - Wq is full";
19164 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19167 /* Failed means we can't issue and need to cancel */
19168 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19169 "2822 IOCB failed %s iotag 0x%x "
19172 piocbq->iotag, piocbq->sli4_xritag);
19173 list_add_tail(&piocbq->list, &completions);
19176 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19179 /* Cancel all the IOCBs that cannot be issued */
19180 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19181 IOERR_SLI_ABORTED);
19187 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19188 * @phba: Pointer to HBA context object.
19189 * @pwqe: Pointer to command WQE.
19190 * @sglq: Pointer to the scatter gather queue object.
19192 * This routine converts the bpl or bde that is in the WQE
19193 * to a sgl list for the sli4 hardware. The physical address
19194 * of the bpl/bde is converted back to a virtual address.
19195 * If the WQE contains a BPL then the list of BDE's is
19196 * converted to sli4_sge's. If the WQE contains a single
19197 * BDE then it is converted to a single sli_sge.
19198 * The WQE is still in cpu endianness so the contents of
19199 * the bpl can be used without byte swapping.
19201 * Returns valid XRI = Success, NO_XRI = Failure.
19204 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19205 struct lpfc_sglq *sglq)
19207 uint16_t xritag = NO_XRI;
19208 struct ulp_bde64 *bpl = NULL;
19209 struct ulp_bde64 bde;
19210 struct sli4_sge *sgl = NULL;
19211 struct lpfc_dmabuf *dmabuf;
19212 union lpfc_wqe128 *wqe;
19215 uint32_t offset = 0; /* accumulated offset in the sg request list */
19216 int inbound = 0; /* number of sg reply entries inbound from firmware */
19219 if (!pwqeq || !sglq)
19222 sgl = (struct sli4_sge *)sglq->sgl;
19224 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19226 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19227 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19228 return sglq->sli4_xritag;
19229 numBdes = pwqeq->rsvd2;
19231 /* The addrHigh and addrLow fields within the WQE
19232 * have not been byteswapped yet so there is no
19233 * need to swap them back.
19235 if (pwqeq->context3)
19236 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19240 bpl = (struct ulp_bde64 *)dmabuf->virt;
19244 for (i = 0; i < numBdes; i++) {
19245 /* Should already be byte swapped. */
19246 sgl->addr_hi = bpl->addrHigh;
19247 sgl->addr_lo = bpl->addrLow;
19249 sgl->word2 = le32_to_cpu(sgl->word2);
19250 if ((i+1) == numBdes)
19251 bf_set(lpfc_sli4_sge_last, sgl, 1);
19253 bf_set(lpfc_sli4_sge_last, sgl, 0);
19254 /* swap the size field back to the cpu so we
19255 * can assign it to the sgl.
19257 bde.tus.w = le32_to_cpu(bpl->tus.w);
19258 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19259 /* The offsets in the sgl need to be accumulated
19260 * separately for the request and reply lists.
19261 * The request is always first, the reply follows.
19264 case CMD_GEN_REQUEST64_WQE:
19265 /* add up the reply sg entries */
19266 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19268 /* first inbound? reset the offset */
19271 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19272 bf_set(lpfc_sli4_sge_type, sgl,
19273 LPFC_SGE_TYPE_DATA);
19274 offset += bde.tus.f.bdeSize;
19276 case CMD_FCP_TRSP64_WQE:
19277 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19278 bf_set(lpfc_sli4_sge_type, sgl,
19279 LPFC_SGE_TYPE_DATA);
19281 case CMD_FCP_TSEND64_WQE:
19282 case CMD_FCP_TRECEIVE64_WQE:
19283 bf_set(lpfc_sli4_sge_type, sgl,
19284 bpl->tus.f.bdeFlags);
19288 offset += bde.tus.f.bdeSize;
19289 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19292 sgl->word2 = cpu_to_le32(sgl->word2);
19296 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19297 /* The addrHigh and addrLow fields of the BDE have not
19298 * been byteswapped yet so they need to be swapped
19299 * before putting them in the sgl.
19301 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19302 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19303 sgl->word2 = le32_to_cpu(sgl->word2);
19304 bf_set(lpfc_sli4_sge_last, sgl, 1);
19305 sgl->word2 = cpu_to_le32(sgl->word2);
19306 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19308 return sglq->sli4_xritag;
19312 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19313 * @phba: Pointer to HBA context object.
19314 * @ring_number: Base sli ring number
19315 * @pwqe: Pointer to command WQE.
19318 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
19319 struct lpfc_iocbq *pwqe)
19321 union lpfc_wqe128 *wqe = &pwqe->wqe;
19322 struct lpfc_nvmet_rcv_ctx *ctxp;
19323 struct lpfc_queue *wq;
19324 struct lpfc_sglq *sglq;
19325 struct lpfc_sli_ring *pring;
19326 unsigned long iflags;
19329 /* NVME_LS and NVME_LS ABTS requests. */
19330 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19331 pring = phba->sli4_hba.nvmels_wq->pring;
19332 spin_lock_irqsave(&pring->ring_lock, iflags);
19333 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19335 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19338 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19339 pwqe->sli4_xritag = sglq->sli4_xritag;
19340 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19341 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19344 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19345 pwqe->sli4_xritag);
19346 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19348 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19352 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19353 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19357 /* NVME_FCREQ and NVME_ABTS requests */
19358 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19359 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19360 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
19362 spin_lock_irqsave(&pring->ring_lock, iflags);
19363 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19364 bf_set(wqe_cqid, &wqe->generic.wqe_com,
19365 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
19366 ret = lpfc_sli4_wq_put(wq, wqe);
19368 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19371 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19372 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19376 /* NVMET requests */
19377 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19378 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19379 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
19381 spin_lock_irqsave(&pring->ring_lock, iflags);
19382 ctxp = pwqe->context2;
19383 sglq = ctxp->ctxbuf->sglq;
19384 if (pwqe->sli4_xritag == NO_XRI) {
19385 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19386 pwqe->sli4_xritag = sglq->sli4_xritag;
19388 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19389 pwqe->sli4_xritag);
19390 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19391 bf_set(wqe_cqid, &wqe->generic.wqe_com,
19392 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
19393 ret = lpfc_sli4_wq_put(wq, wqe);
19395 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19398 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19399 spin_unlock_irqrestore(&pring->ring_lock, iflags);