1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/string.h>
41 #include <linux/module.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/ethtool.h>
45 #include <linux/etherdevice.h>
46 #include <linux/vmalloc.h>
47 #include <linux/crash_dump.h>
48 #include <linux/qed/qed_if.h>
49 #include <linux/qed/qed_ll2_if.h>
52 #include "qed_sriov.h"
54 #include "qed_dev_api.h"
57 #include "qed_iscsi.h"
61 #include "qed_selftest.h"
62 #include "qed_debug.h"
64 #define QED_ROCE_QPS (8192)
65 #define QED_ROCE_DPIS (8)
67 static char version[] =
68 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
70 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_MODULE_VERSION);
74 #define FW_FILE_VERSION \
75 __stringify(FW_MAJOR_VERSION) "." \
76 __stringify(FW_MINOR_VERSION) "." \
77 __stringify(FW_REVISION_VERSION) "." \
78 __stringify(FW_ENGINEERING_VERSION)
80 #define QED_FW_FILE_NAME \
85 static int __init qed_init(void)
87 pr_info("%s", version);
92 static void __exit qed_cleanup(void)
94 pr_notice("qed_cleanup called\n");
97 module_init(qed_init);
98 module_exit(qed_cleanup);
100 /* Check if the DMA controller on the machine can properly handle the DMA
101 * addressing required by the device.
103 static int qed_set_coherency_mask(struct qed_dev *cdev)
105 struct device *dev = &cdev->pdev->dev;
107 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
108 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
110 "Can't request 64-bit consistent allocations\n");
113 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
114 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
121 static void qed_free_pci(struct qed_dev *cdev)
123 struct pci_dev *pdev = cdev->pdev;
125 if (cdev->doorbells && cdev->db_size)
126 iounmap(cdev->doorbells);
128 iounmap(cdev->regview);
129 if (atomic_read(&pdev->enable_cnt) == 1)
130 pci_release_regions(pdev);
132 pci_disable_device(pdev);
135 #define PCI_REVISION_ID_ERROR_VAL 0xff
137 /* Performs PCI initializations as well as initializing PCI-related parameters
138 * in the device structrue. Returns 0 in case of success.
140 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
147 rc = pci_enable_device(pdev);
149 DP_NOTICE(cdev, "Cannot enable PCI device\n");
153 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
154 DP_NOTICE(cdev, "No memory region found in bar #0\n");
159 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
160 DP_NOTICE(cdev, "No memory region found in bar #2\n");
165 if (atomic_read(&pdev->enable_cnt) == 1) {
166 rc = pci_request_regions(pdev, "qed");
169 "Failed to request PCI memory resources\n");
172 pci_set_master(pdev);
173 pci_save_state(pdev);
176 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
177 if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
179 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
184 if (!pci_is_pcie(pdev)) {
185 DP_NOTICE(cdev, "The bus is not PCI Express\n");
190 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
191 if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
192 DP_NOTICE(cdev, "Cannot find power management capability\n");
194 rc = qed_set_coherency_mask(cdev);
198 cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
199 cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
200 cdev->pci_params.irq = pdev->irq;
202 cdev->regview = pci_ioremap_bar(pdev, 0);
203 if (!cdev->regview) {
204 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
209 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
210 cdev->db_size = pci_resource_len(cdev->pdev, 2);
211 if (!cdev->db_size) {
213 DP_NOTICE(cdev, "No Doorbell bar available\n");
220 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
222 if (!cdev->doorbells) {
223 DP_NOTICE(cdev, "Cannot map doorbell space\n");
230 pci_release_regions(pdev);
232 pci_disable_device(pdev);
237 int qed_fill_dev_info(struct qed_dev *cdev,
238 struct qed_dev_info *dev_info)
240 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
241 struct qed_hw_info *hw_info = &p_hwfn->hw_info;
242 struct qed_tunnel_info *tun = &cdev->tunnel;
245 memset(dev_info, 0, sizeof(struct qed_dev_info));
247 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
248 tun->vxlan.b_mode_enabled)
249 dev_info->vxlan_enable = true;
251 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
252 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
253 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
254 dev_info->gre_enable = true;
256 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
257 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
258 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
259 dev_info->geneve_enable = true;
261 dev_info->num_hwfns = cdev->num_hwfns;
262 dev_info->pci_mem_start = cdev->pci_params.mem_start;
263 dev_info->pci_mem_end = cdev->pci_params.mem_end;
264 dev_info->pci_irq = cdev->pci_params.irq;
265 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
266 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
267 dev_info->dev_type = cdev->type;
268 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
271 dev_info->fw_major = FW_MAJOR_VERSION;
272 dev_info->fw_minor = FW_MINOR_VERSION;
273 dev_info->fw_rev = FW_REVISION_VERSION;
274 dev_info->fw_eng = FW_ENGINEERING_VERSION;
275 dev_info->mf_mode = cdev->mf_mode;
276 dev_info->tx_switching = true;
278 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
279 dev_info->wol_support = true;
281 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
283 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
284 &dev_info->fw_minor, &dev_info->fw_rev,
289 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
291 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
292 &dev_info->mfw_rev, NULL);
294 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
295 &dev_info->mbi_version);
297 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
298 &dev_info->flash_size);
300 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
303 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
304 &dev_info->mfw_rev, NULL);
307 dev_info->mtu = hw_info->mtu;
312 static void qed_free_cdev(struct qed_dev *cdev)
317 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
319 struct qed_dev *cdev;
321 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
325 qed_init_struct(cdev);
330 /* Sets the requested power state */
331 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
336 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
341 static struct qed_dev *qed_probe(struct pci_dev *pdev,
342 struct qed_probe_params *params)
344 struct qed_dev *cdev;
347 cdev = qed_alloc_cdev(pdev);
351 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
352 cdev->protocol = params->protocol;
355 cdev->b_is_vf = true;
357 qed_init_dp(cdev, params->dp_module, params->dp_level);
359 rc = qed_init_pci(cdev, pdev);
361 DP_ERR(cdev, "init pci failed\n");
364 DP_INFO(cdev, "PCI init completed successfully\n");
366 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
368 DP_ERR(cdev, "hw prepare failed\n");
372 DP_INFO(cdev, "qed_probe completed successffuly\n");
384 static void qed_remove(struct qed_dev *cdev)
393 qed_set_power_state(cdev, PCI_D3hot);
398 static void qed_disable_msix(struct qed_dev *cdev)
400 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
401 pci_disable_msix(cdev->pdev);
402 kfree(cdev->int_params.msix_table);
403 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
404 pci_disable_msi(cdev->pdev);
407 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
410 static int qed_enable_msix(struct qed_dev *cdev,
411 struct qed_int_params *int_params)
415 cnt = int_params->in.num_vectors;
417 for (i = 0; i < cnt; i++)
418 int_params->msix_table[i].entry = i;
420 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
421 int_params->in.min_msix_cnt, cnt);
422 if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
423 (rc % cdev->num_hwfns)) {
424 pci_disable_msix(cdev->pdev);
426 /* If fastpath is initialized, we need at least one interrupt
427 * per hwfn [and the slow path interrupts]. New requested number
428 * should be a multiple of the number of hwfns.
430 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
432 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
433 cnt, int_params->in.num_vectors);
434 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
440 /* For VFs, we should return with an error in case we didn't get the
441 * exact number of msix vectors as we requested.
442 * Not doing that will lead to a crash when starting queues for
445 if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
446 /* MSI-x configuration was achieved */
447 int_params->out.int_mode = QED_INT_MODE_MSIX;
448 int_params->out.num_vectors = rc;
452 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
459 /* This function outputs the int mode and the number of enabled msix vector */
460 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
462 struct qed_int_params *int_params = &cdev->int_params;
463 struct msix_entry *tbl;
466 switch (int_params->in.int_mode) {
467 case QED_INT_MODE_MSIX:
468 /* Allocate MSIX table */
469 cnt = int_params->in.num_vectors;
470 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
471 if (!int_params->msix_table) {
477 rc = qed_enable_msix(cdev, int_params);
481 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
482 kfree(int_params->msix_table);
487 case QED_INT_MODE_MSI:
488 if (cdev->num_hwfns == 1) {
489 rc = pci_enable_msi(cdev->pdev);
491 int_params->out.int_mode = QED_INT_MODE_MSI;
495 DP_NOTICE(cdev, "Failed to enable MSI\n");
501 case QED_INT_MODE_INTA:
502 int_params->out.int_mode = QED_INT_MODE_INTA;
506 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
507 int_params->in.int_mode);
513 DP_INFO(cdev, "Using %s interrupts\n",
514 int_params->out.int_mode == QED_INT_MODE_INTA ?
515 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
517 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
522 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
523 int index, void(*handler)(void *))
525 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
526 int relative_idx = index / cdev->num_hwfns;
528 hwfn->simd_proto_handler[relative_idx].func = handler;
529 hwfn->simd_proto_handler[relative_idx].token = token;
532 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
534 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
535 int relative_idx = index / cdev->num_hwfns;
537 memset(&hwfn->simd_proto_handler[relative_idx], 0,
538 sizeof(struct qed_simd_fp_handler));
541 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
543 tasklet_schedule((struct tasklet_struct *)tasklet);
547 static irqreturn_t qed_single_int(int irq, void *dev_instance)
549 struct qed_dev *cdev = (struct qed_dev *)dev_instance;
550 struct qed_hwfn *hwfn;
551 irqreturn_t rc = IRQ_NONE;
555 for (i = 0; i < cdev->num_hwfns; i++) {
556 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
561 hwfn = &cdev->hwfns[i];
563 /* Slowpath interrupt */
564 if (unlikely(status & 0x1)) {
565 tasklet_schedule(hwfn->sp_dpc);
570 /* Fastpath interrupts */
571 for (j = 0; j < 64; j++) {
572 if ((0x2ULL << j) & status) {
573 struct qed_simd_fp_handler *p_handler =
574 &hwfn->simd_proto_handler[j];
577 p_handler->func(p_handler->token);
580 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
583 status &= ~(0x2ULL << j);
588 if (unlikely(status))
589 DP_VERBOSE(hwfn, NETIF_MSG_INTR,
590 "got an unknown interrupt status 0x%llx\n",
597 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
599 struct qed_dev *cdev = hwfn->cdev;
604 int_mode = cdev->int_params.out.int_mode;
605 if (int_mode == QED_INT_MODE_MSIX) {
607 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
608 id, cdev->pdev->bus->number,
609 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
610 rc = request_irq(cdev->int_params.msix_table[id].vector,
611 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
613 unsigned long flags = 0;
615 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
616 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
617 PCI_FUNC(cdev->pdev->devfn));
619 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
620 flags |= IRQF_SHARED;
622 rc = request_irq(cdev->pdev->irq, qed_single_int,
623 flags, cdev->name, cdev);
627 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
629 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
630 "Requested slowpath %s\n",
631 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
636 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
638 /* Calling the disable function will make sure that any
639 * currently-running function is completed. The following call to the
640 * enable function makes this sequence a flush-like operation.
642 if (p_hwfn->b_sp_dpc_enabled) {
643 tasklet_disable(p_hwfn->sp_dpc);
644 tasklet_enable(p_hwfn->sp_dpc);
648 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
650 struct qed_dev *cdev = p_hwfn->cdev;
651 u8 id = p_hwfn->my_id;
654 int_mode = cdev->int_params.out.int_mode;
655 if (int_mode == QED_INT_MODE_MSIX)
656 synchronize_irq(cdev->int_params.msix_table[id].vector);
658 synchronize_irq(cdev->pdev->irq);
660 qed_slowpath_tasklet_flush(p_hwfn);
663 static void qed_slowpath_irq_free(struct qed_dev *cdev)
667 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
668 for_each_hwfn(cdev, i) {
669 if (!cdev->hwfns[i].b_int_requested)
671 synchronize_irq(cdev->int_params.msix_table[i].vector);
672 free_irq(cdev->int_params.msix_table[i].vector,
673 cdev->hwfns[i].sp_dpc);
676 if (QED_LEADING_HWFN(cdev)->b_int_requested)
677 free_irq(cdev->pdev->irq, cdev);
679 qed_int_disable_post_isr_release(cdev);
682 static int qed_nic_stop(struct qed_dev *cdev)
686 rc = qed_hw_stop(cdev);
688 for (i = 0; i < cdev->num_hwfns; i++) {
689 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
691 if (p_hwfn->b_sp_dpc_enabled) {
692 tasklet_disable(p_hwfn->sp_dpc);
693 p_hwfn->b_sp_dpc_enabled = false;
694 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
695 "Disabled sp taskelt [hwfn %d] at %p\n",
700 qed_dbg_pf_exit(cdev);
705 static int qed_nic_setup(struct qed_dev *cdev)
709 /* Determine if interface is going to require LL2 */
710 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
711 for (i = 0; i < cdev->num_hwfns; i++) {
712 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
714 p_hwfn->using_ll2 = true;
718 rc = qed_resc_alloc(cdev);
722 DP_INFO(cdev, "Allocated qed resources\n");
724 qed_resc_setup(cdev);
729 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
733 /* Mark the fastpath as free/used */
734 cdev->int_params.fp_initialized = cnt ? true : false;
736 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
737 limit = cdev->num_hwfns * 63;
738 else if (cdev->int_params.fp_msix_cnt)
739 limit = cdev->int_params.fp_msix_cnt;
744 return min_t(int, cnt, limit);
747 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
749 memset(info, 0, sizeof(struct qed_int_info));
751 if (!cdev->int_params.fp_initialized) {
753 "Protocol driver requested interrupt information, but its support is not yet configured\n");
757 /* Need to expose only MSI-X information; Single IRQ is handled solely
760 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
761 int msix_base = cdev->int_params.fp_msix_base;
763 info->msix_cnt = cdev->int_params.fp_msix_cnt;
764 info->msix = &cdev->int_params.msix_table[msix_base];
770 static int qed_slowpath_setup_int(struct qed_dev *cdev,
771 enum qed_int_mode int_mode)
773 struct qed_sb_cnt_info sb_cnt_info;
774 int num_l2_queues = 0;
778 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
779 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
783 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
784 cdev->int_params.in.int_mode = int_mode;
785 for_each_hwfn(cdev, i) {
786 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
787 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
788 cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
789 cdev->int_params.in.num_vectors++; /* slowpath */
792 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
793 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
795 if (is_kdump_kernel()) {
797 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
798 cdev->int_params.in.min_msix_cnt);
799 cdev->int_params.in.num_vectors =
800 cdev->int_params.in.min_msix_cnt;
803 rc = qed_set_int_mode(cdev, false);
805 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
809 cdev->int_params.fp_msix_base = cdev->num_hwfns;
810 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
813 if (!IS_ENABLED(CONFIG_QED_RDMA) ||
814 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
817 for_each_hwfn(cdev, i)
818 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
820 DP_VERBOSE(cdev, QED_MSG_RDMA,
821 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
822 cdev->int_params.fp_msix_cnt, num_l2_queues);
824 if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
825 cdev->int_params.rdma_msix_cnt =
826 (cdev->int_params.fp_msix_cnt - num_l2_queues)
828 cdev->int_params.rdma_msix_base =
829 cdev->int_params.fp_msix_base + num_l2_queues;
830 cdev->int_params.fp_msix_cnt = num_l2_queues;
832 cdev->int_params.rdma_msix_cnt = 0;
835 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
836 cdev->int_params.rdma_msix_cnt,
837 cdev->int_params.rdma_msix_base);
842 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
846 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
847 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
849 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
850 &cdev->int_params.in.num_vectors);
851 if (cdev->num_hwfns > 1) {
854 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
855 cdev->int_params.in.num_vectors += vectors;
858 /* We want a minimum of one fastpath vector per vf hwfn */
859 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
861 rc = qed_set_int_mode(cdev, true);
865 cdev->int_params.fp_msix_base = 0;
866 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
871 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
872 u8 *input_buf, u32 max_size, u8 *unzip_buf)
876 p_hwfn->stream->next_in = input_buf;
877 p_hwfn->stream->avail_in = input_len;
878 p_hwfn->stream->next_out = unzip_buf;
879 p_hwfn->stream->avail_out = max_size;
881 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
884 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
889 rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
890 zlib_inflateEnd(p_hwfn->stream);
892 if (rc != Z_OK && rc != Z_STREAM_END) {
893 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
894 p_hwfn->stream->msg, rc);
898 return p_hwfn->stream->total_out / 4;
901 static int qed_alloc_stream_mem(struct qed_dev *cdev)
906 for_each_hwfn(cdev, i) {
907 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
909 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
913 workspace = vzalloc(zlib_inflate_workspacesize());
916 p_hwfn->stream->workspace = workspace;
922 static void qed_free_stream_mem(struct qed_dev *cdev)
926 for_each_hwfn(cdev, i) {
927 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
932 vfree(p_hwfn->stream->workspace);
933 kfree(p_hwfn->stream);
937 static void qed_update_pf_params(struct qed_dev *cdev,
938 struct qed_pf_params *params)
942 if (IS_ENABLED(CONFIG_QED_RDMA)) {
943 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
944 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
945 /* divide by 3 the MRs to avoid MF ILT overflow */
946 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
949 if (cdev->num_hwfns > 1 || IS_VF(cdev))
950 params->eth_pf_params.num_arfs_filters = 0;
952 /* In case we might support RDMA, don't allow qede to be greedy
953 * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
955 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
958 num_cons = ¶ms->eth_pf_params.num_cons;
959 *num_cons = min_t(u16, *num_cons, 192);
962 for (i = 0; i < cdev->num_hwfns; i++) {
963 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
965 p_hwfn->pf_params = *params;
969 static int qed_slowpath_start(struct qed_dev *cdev,
970 struct qed_slowpath_params *params)
972 struct qed_drv_load_params drv_load_params;
973 struct qed_hw_init_params hw_init_params;
974 struct qed_mcp_drv_version drv_version;
975 struct qed_tunnel_info tunn_info;
976 const u8 *data = NULL;
977 struct qed_hwfn *hwfn;
978 struct qed_ptt *p_ptt;
981 if (qed_iov_wq_start(cdev))
985 rc = reject_firmware(&cdev->firmware, QED_FW_FILE_NAME,
989 "Failed to find fw file - /lib/firmware/%s\n",
994 if (cdev->num_hwfns == 1) {
995 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
997 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1000 "Failed to acquire PTT for aRFS\n");
1007 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1008 rc = qed_nic_setup(cdev);
1013 rc = qed_slowpath_setup_int(cdev, params->int_mode);
1015 rc = qed_slowpath_vf_setup_int(cdev);
1020 /* Allocate stream for unzipping */
1021 rc = qed_alloc_stream_mem(cdev);
1025 /* First Dword used to differentiate between various sources */
1026 data = cdev->firmware->data + sizeof(u32);
1028 qed_dbg_pf_init(cdev);
1031 /* Start the slowpath */
1032 memset(&hw_init_params, 0, sizeof(hw_init_params));
1033 memset(&tunn_info, 0, sizeof(tunn_info));
1034 tunn_info.vxlan.b_mode_enabled = true;
1035 tunn_info.l2_gre.b_mode_enabled = true;
1036 tunn_info.ip_gre.b_mode_enabled = true;
1037 tunn_info.l2_geneve.b_mode_enabled = true;
1038 tunn_info.ip_geneve.b_mode_enabled = true;
1039 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1040 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1041 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1042 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1043 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1044 hw_init_params.p_tunn = &tunn_info;
1045 hw_init_params.b_hw_start = true;
1046 hw_init_params.int_mode = cdev->int_params.out.int_mode;
1047 hw_init_params.allow_npar_tx_switch = true;
1048 hw_init_params.bin_fw_data = data;
1050 memset(&drv_load_params, 0, sizeof(drv_load_params));
1051 drv_load_params.is_crash_kernel = is_kdump_kernel();
1052 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1053 drv_load_params.avoid_eng_reset = false;
1054 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1055 hw_init_params.p_drv_load_params = &drv_load_params;
1057 rc = qed_hw_init(cdev, &hw_init_params);
1062 "HW initialization and function start completed successfully\n");
1065 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1066 BIT(QED_MODE_L2GENEVE_TUNN) |
1067 BIT(QED_MODE_IPGENEVE_TUNN) |
1068 BIT(QED_MODE_L2GRE_TUNN) |
1069 BIT(QED_MODE_IPGRE_TUNN));
1072 /* Allocate LL2 interface if needed */
1073 if (QED_LEADING_HWFN(cdev)->using_ll2) {
1074 rc = qed_ll2_alloc_if(cdev);
1079 hwfn = QED_LEADING_HWFN(cdev);
1080 drv_version.version = (params->drv_major << 24) |
1081 (params->drv_minor << 16) |
1082 (params->drv_rev << 8) |
1084 strlcpy(drv_version.name, params->name,
1085 MCP_DRV_VER_STR_SIZE - 4);
1086 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1089 DP_NOTICE(cdev, "Failed sending drv version command\n");
1094 qed_reset_vport_stats(cdev);
1099 qed_ll2_dealloc_if(cdev);
1103 qed_hw_timers_stop_all(cdev);
1105 qed_slowpath_irq_free(cdev);
1106 qed_free_stream_mem(cdev);
1107 qed_disable_msix(cdev);
1109 qed_resc_free(cdev);
1112 release_firmware(cdev->firmware);
1114 if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1115 QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1116 qed_ptt_release(QED_LEADING_HWFN(cdev),
1117 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1119 qed_iov_wq_stop(cdev, false);
1124 static int qed_slowpath_stop(struct qed_dev *cdev)
1129 qed_ll2_dealloc_if(cdev);
1132 if (cdev->num_hwfns == 1)
1133 qed_ptt_release(QED_LEADING_HWFN(cdev),
1134 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1135 qed_free_stream_mem(cdev);
1136 if (IS_QED_ETH_IF(cdev))
1137 qed_sriov_disable(cdev, true);
1143 qed_slowpath_irq_free(cdev);
1145 qed_disable_msix(cdev);
1147 qed_resc_free(cdev);
1149 qed_iov_wq_stop(cdev, true);
1152 release_firmware(cdev->firmware);
1157 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1161 memcpy(cdev->name, name, NAME_SIZE);
1162 for_each_hwfn(cdev, i)
1163 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1166 static u32 qed_sb_init(struct qed_dev *cdev,
1167 struct qed_sb_info *sb_info,
1169 dma_addr_t sb_phy_addr, u16 sb_id,
1170 enum qed_sb_type type)
1172 struct qed_hwfn *p_hwfn;
1173 struct qed_ptt *p_ptt;
1179 /* RoCE uses single engine and CMT uses two engines. When using both
1180 * we force only a single engine. Storage uses only engine 0 too.
1182 if (type == QED_SB_TYPE_L2_QUEUE)
1183 n_hwfns = cdev->num_hwfns;
1187 hwfn_index = sb_id % n_hwfns;
1188 p_hwfn = &cdev->hwfns[hwfn_index];
1189 rel_sb_id = sb_id / n_hwfns;
1191 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1192 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1193 hwfn_index, rel_sb_id, sb_id);
1195 if (IS_PF(p_hwfn->cdev)) {
1196 p_ptt = qed_ptt_acquire(p_hwfn);
1200 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1201 sb_phy_addr, rel_sb_id);
1202 qed_ptt_release(p_hwfn, p_ptt);
1204 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1205 sb_phy_addr, rel_sb_id);
1211 static u32 qed_sb_release(struct qed_dev *cdev,
1212 struct qed_sb_info *sb_info, u16 sb_id)
1214 struct qed_hwfn *p_hwfn;
1219 hwfn_index = sb_id % cdev->num_hwfns;
1220 p_hwfn = &cdev->hwfns[hwfn_index];
1221 rel_sb_id = sb_id / cdev->num_hwfns;
1223 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1224 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1225 hwfn_index, rel_sb_id, sb_id);
1227 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1232 static bool qed_can_link_change(struct qed_dev *cdev)
1237 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1239 struct qed_hwfn *hwfn;
1240 struct qed_mcp_link_params *link_params;
1241 struct qed_ptt *ptt;
1247 /* The link should be set only once per PF */
1248 hwfn = &cdev->hwfns[0];
1250 /* When VF wants to set link, force it to read the bulletin instead.
1251 * This mimics the PF behavior, where a noitification [both immediate
1252 * and possible later] would be generated when changing properties.
1255 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1259 ptt = qed_ptt_acquire(hwfn);
1263 link_params = qed_mcp_get_link_params(hwfn);
1264 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1265 link_params->speed.autoneg = params->autoneg;
1266 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1267 link_params->speed.advertised_speeds = 0;
1268 if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
1269 (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
1270 link_params->speed.advertised_speeds |=
1271 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1272 if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
1273 link_params->speed.advertised_speeds |=
1274 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1275 if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
1276 link_params->speed.advertised_speeds |=
1277 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1278 if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
1279 link_params->speed.advertised_speeds |=
1280 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1281 if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
1282 link_params->speed.advertised_speeds |=
1283 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1284 if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
1285 link_params->speed.advertised_speeds |=
1286 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1288 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1289 link_params->speed.forced_speed = params->forced_speed;
1290 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1291 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1292 link_params->pause.autoneg = true;
1294 link_params->pause.autoneg = false;
1295 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1296 link_params->pause.forced_rx = true;
1298 link_params->pause.forced_rx = false;
1299 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1300 link_params->pause.forced_tx = true;
1302 link_params->pause.forced_tx = false;
1304 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1305 switch (params->loopback_mode) {
1306 case QED_LINK_LOOPBACK_INT_PHY:
1307 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1309 case QED_LINK_LOOPBACK_EXT_PHY:
1310 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1312 case QED_LINK_LOOPBACK_EXT:
1313 link_params->loopback_mode = ETH_LOOPBACK_EXT;
1315 case QED_LINK_LOOPBACK_MAC:
1316 link_params->loopback_mode = ETH_LOOPBACK_MAC;
1319 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1324 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1325 memcpy(&link_params->eee, ¶ms->eee,
1326 sizeof(link_params->eee));
1328 rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1330 qed_ptt_release(hwfn, ptt);
1335 static int qed_get_port_type(u32 media_type)
1339 switch (media_type) {
1340 case MEDIA_SFPP_10G_FIBER:
1341 case MEDIA_SFP_1G_FIBER:
1342 case MEDIA_XFP_FIBER:
1343 case MEDIA_MODULE_FIBER:
1345 port_type = PORT_FIBRE;
1347 case MEDIA_DA_TWINAX:
1348 port_type = PORT_DA;
1351 port_type = PORT_TP;
1353 case MEDIA_NOT_PRESENT:
1354 port_type = PORT_NONE;
1356 case MEDIA_UNSPECIFIED:
1358 port_type = PORT_OTHER;
1364 static int qed_get_link_data(struct qed_hwfn *hwfn,
1365 struct qed_mcp_link_params *params,
1366 struct qed_mcp_link_state *link,
1367 struct qed_mcp_link_capabilities *link_caps)
1371 if (!IS_PF(hwfn->cdev)) {
1372 qed_vf_get_link_params(hwfn, params);
1373 qed_vf_get_link_state(hwfn, link);
1374 qed_vf_get_link_caps(hwfn, link_caps);
1379 p = qed_mcp_get_link_params(hwfn);
1382 memcpy(params, p, sizeof(*params));
1384 p = qed_mcp_get_link_state(hwfn);
1387 memcpy(link, p, sizeof(*link));
1389 p = qed_mcp_get_link_capabilities(hwfn);
1392 memcpy(link_caps, p, sizeof(*link_caps));
1397 static void qed_fill_link(struct qed_hwfn *hwfn,
1398 struct qed_ptt *ptt,
1399 struct qed_link_output *if_link)
1401 struct qed_mcp_link_params params;
1402 struct qed_mcp_link_state link;
1403 struct qed_mcp_link_capabilities link_caps;
1406 memset(if_link, 0, sizeof(*if_link));
1408 /* Prepare source inputs */
1409 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
1410 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1414 /* Set the link parameters to pass to protocol driver */
1416 if_link->link_up = true;
1418 /* TODO - at the moment assume supported and advertised speed equal */
1419 if_link->supported_caps = QED_LM_FIBRE_BIT;
1420 if (link_caps.default_speed_autoneg)
1421 if_link->supported_caps |= QED_LM_Autoneg_BIT;
1422 if (params.pause.autoneg ||
1423 (params.pause.forced_rx && params.pause.forced_tx))
1424 if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1425 if (params.pause.autoneg || params.pause.forced_rx ||
1426 params.pause.forced_tx)
1427 if_link->supported_caps |= QED_LM_Pause_BIT;
1429 if_link->advertised_caps = if_link->supported_caps;
1430 if (params.speed.autoneg)
1431 if_link->advertised_caps |= QED_LM_Autoneg_BIT;
1433 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
1434 if (params.speed.advertised_speeds &
1435 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1436 if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
1437 QED_LM_1000baseT_Full_BIT;
1438 if (params.speed.advertised_speeds &
1439 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1440 if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
1441 if (params.speed.advertised_speeds &
1442 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1443 if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
1444 if (params.speed.advertised_speeds &
1445 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1446 if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
1447 if (params.speed.advertised_speeds &
1448 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1449 if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
1450 if (params.speed.advertised_speeds &
1451 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1452 if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
1454 if (link_caps.speed_capabilities &
1455 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1456 if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
1457 QED_LM_1000baseT_Full_BIT;
1458 if (link_caps.speed_capabilities &
1459 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1460 if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
1461 if (link_caps.speed_capabilities &
1462 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1463 if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
1464 if (link_caps.speed_capabilities &
1465 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1466 if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
1467 if (link_caps.speed_capabilities &
1468 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1469 if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
1470 if (link_caps.speed_capabilities &
1471 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1472 if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
1475 if_link->speed = link.speed;
1477 /* TODO - fill duplex properly */
1478 if_link->duplex = DUPLEX_FULL;
1479 qed_mcp_get_media_type(hwfn, ptt, &media_type);
1480 if_link->port = qed_get_port_type(media_type);
1482 if_link->autoneg = params.speed.autoneg;
1484 if (params.pause.autoneg)
1485 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1486 if (params.pause.forced_rx)
1487 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1488 if (params.pause.forced_tx)
1489 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1491 /* Link partner capabilities */
1492 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
1493 if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
1494 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
1495 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1496 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1497 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1498 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1499 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1500 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1501 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1502 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1503 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1504 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1505 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1507 if (link.an_complete)
1508 if_link->lp_caps |= QED_LM_Autoneg_BIT;
1510 if (link.partner_adv_pause)
1511 if_link->lp_caps |= QED_LM_Pause_BIT;
1512 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1513 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1514 if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1516 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
1517 if_link->eee_supported = false;
1519 if_link->eee_supported = true;
1520 if_link->eee_active = link.eee_active;
1521 if_link->sup_caps = link_caps.eee_speed_caps;
1522 /* MFW clears adv_caps on eee disable; use configured value */
1523 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
1524 params.eee.adv_caps;
1525 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
1526 if_link->eee.enable = params.eee.enable;
1527 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
1528 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
1532 static void qed_get_current_link(struct qed_dev *cdev,
1533 struct qed_link_output *if_link)
1535 struct qed_hwfn *hwfn;
1536 struct qed_ptt *ptt;
1539 hwfn = &cdev->hwfns[0];
1541 ptt = qed_ptt_acquire(hwfn);
1543 qed_fill_link(hwfn, ptt, if_link);
1544 qed_ptt_release(hwfn, ptt);
1546 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
1549 qed_fill_link(hwfn, NULL, if_link);
1552 for_each_hwfn(cdev, i)
1553 qed_inform_vf_link_state(&cdev->hwfns[i]);
1556 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
1558 void *cookie = hwfn->cdev->ops_cookie;
1559 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1560 struct qed_link_output if_link;
1562 qed_fill_link(hwfn, ptt, &if_link);
1563 qed_inform_vf_link_state(hwfn);
1565 if (IS_LEAD_HWFN(hwfn) && cookie)
1566 op->link_update(cookie, &if_link);
1569 static int qed_drain(struct qed_dev *cdev)
1571 struct qed_hwfn *hwfn;
1572 struct qed_ptt *ptt;
1578 for_each_hwfn(cdev, i) {
1579 hwfn = &cdev->hwfns[i];
1580 ptt = qed_ptt_acquire(hwfn);
1582 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1585 rc = qed_mcp_drain(hwfn, ptt);
1586 qed_ptt_release(hwfn, ptt);
1594 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
1597 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1598 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1604 rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len);
1605 qed_ptt_release(hwfn, ptt);
1609 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
1612 return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
1615 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
1617 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1618 struct qed_ptt *ptt;
1621 ptt = qed_ptt_acquire(hwfn);
1625 status = qed_mcp_set_led(hwfn, ptt, mode);
1627 qed_ptt_release(hwfn, ptt);
1632 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
1634 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1635 struct qed_ptt *ptt;
1641 ptt = qed_ptt_acquire(hwfn);
1645 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
1646 : QED_OV_WOL_DISABLED);
1649 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1652 qed_ptt_release(hwfn, ptt);
1656 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
1658 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1659 struct qed_ptt *ptt;
1665 ptt = qed_ptt_acquire(hwfn);
1669 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
1670 QED_OV_DRIVER_STATE_ACTIVE :
1671 QED_OV_DRIVER_STATE_DISABLED);
1673 qed_ptt_release(hwfn, ptt);
1678 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
1680 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1681 struct qed_ptt *ptt;
1687 ptt = qed_ptt_acquire(hwfn);
1691 status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
1695 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1698 qed_ptt_release(hwfn, ptt);
1702 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
1704 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1705 struct qed_ptt *ptt;
1711 ptt = qed_ptt_acquire(hwfn);
1715 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
1719 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1722 qed_ptt_release(hwfn, ptt);
1726 static struct qed_selftest_ops qed_selftest_ops_pass = {
1727 .selftest_memory = &qed_selftest_memory,
1728 .selftest_interrupt = &qed_selftest_interrupt,
1729 .selftest_register = &qed_selftest_register,
1730 .selftest_clock = &qed_selftest_clock,
1731 .selftest_nvram = &qed_selftest_nvram,
1734 const struct qed_common_ops qed_common_ops_pass = {
1735 .selftest = &qed_selftest_ops_pass,
1736 .probe = &qed_probe,
1737 .remove = &qed_remove,
1738 .set_power_state = &qed_set_power_state,
1739 .set_name = &qed_set_name,
1740 .update_pf_params = &qed_update_pf_params,
1741 .slowpath_start = &qed_slowpath_start,
1742 .slowpath_stop = &qed_slowpath_stop,
1743 .set_fp_int = &qed_set_int_fp,
1744 .get_fp_int = &qed_get_int_fp,
1745 .sb_init = &qed_sb_init,
1746 .sb_release = &qed_sb_release,
1747 .simd_handler_config = &qed_simd_handler_config,
1748 .simd_handler_clean = &qed_simd_handler_clean,
1749 .dbg_grc = &qed_dbg_grc,
1750 .dbg_grc_size = &qed_dbg_grc_size,
1751 .can_link_change = &qed_can_link_change,
1752 .set_link = &qed_set_link,
1753 .get_link = &qed_get_current_link,
1754 .drain = &qed_drain,
1755 .update_msglvl = &qed_init_dp,
1756 .dbg_all_data = &qed_dbg_all_data,
1757 .dbg_all_data_size = &qed_dbg_all_data_size,
1758 .chain_alloc = &qed_chain_alloc,
1759 .chain_free = &qed_chain_free,
1760 .nvm_get_image = &qed_nvm_get_image,
1761 .set_coalesce = &qed_set_coalesce,
1762 .set_led = &qed_set_led,
1763 .update_drv_state = &qed_update_drv_state,
1764 .update_mac = &qed_update_mac,
1765 .update_mtu = &qed_update_mtu,
1766 .update_wol = &qed_update_wol,
1769 void qed_get_protocol_stats(struct qed_dev *cdev,
1770 enum qed_mcp_protocol_type type,
1771 union qed_mcp_protocol_stats *stats)
1773 struct qed_eth_stats eth_stats;
1775 memset(stats, 0, sizeof(*stats));
1778 case QED_MCP_LAN_STATS:
1779 qed_get_vport_stats(cdev, ð_stats);
1780 stats->lan_stats.ucast_rx_pkts =
1781 eth_stats.common.rx_ucast_pkts;
1782 stats->lan_stats.ucast_tx_pkts =
1783 eth_stats.common.tx_ucast_pkts;
1784 stats->lan_stats.fcs_err = -1;
1786 case QED_MCP_FCOE_STATS:
1787 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
1789 case QED_MCP_ISCSI_STATS:
1790 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
1793 DP_VERBOSE(cdev, QED_MSG_SP,
1794 "Invalid protocol type = %d\n", type);