GNU Linux-libre 4.9.337-gnu1
[releases.git] / drivers / net / ethernet / qlogic / qed / qed_main.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/stddef.h>
10 #include <linux/pci.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/version.h>
14 #include <linux/delay.h>
15 #include <asm/byteorder.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/string.h>
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/workqueue.h>
21 #include <linux/ethtool.h>
22 #include <linux/etherdevice.h>
23 #include <linux/vmalloc.h>
24 #include <linux/qed/qed_if.h>
25 #include <linux/qed/qed_ll2_if.h>
26 #include <linux/crash_dump.h>
27
28 #include "qed.h"
29 #include "qed_sriov.h"
30 #include "qed_sp.h"
31 #include "qed_dev_api.h"
32 #include "qed_ll2.h"
33 #include "qed_mcp.h"
34 #include "qed_hw.h"
35 #include "qed_selftest.h"
36
37 #define QED_ROCE_QPS                    (8192)
38 #define QED_ROCE_DPIS                   (8)
39
40 static char version[] =
41         "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
42
43 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION);
46
47 #define FW_FILE_VERSION                         \
48         __stringify(FW_MAJOR_VERSION) "."       \
49         __stringify(FW_MINOR_VERSION) "."       \
50         __stringify(FW_REVISION_VERSION) "."    \
51         __stringify(FW_ENGINEERING_VERSION)
52
53 #define QED_FW_FILE_NAME        \
54         "/*(DEBLOBBED)*/"
55
56 /*(DEBLOBBED)*/
57
58 static int __init qed_init(void)
59 {
60         pr_info("%s", version);
61
62         return 0;
63 }
64
65 static void __exit qed_cleanup(void)
66 {
67         pr_notice("qed_cleanup called\n");
68 }
69
70 module_init(qed_init);
71 module_exit(qed_cleanup);
72
73 /* Check if the DMA controller on the machine can properly handle the DMA
74  * addressing required by the device.
75 */
76 static int qed_set_coherency_mask(struct qed_dev *cdev)
77 {
78         struct device *dev = &cdev->pdev->dev;
79
80         if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
81                 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
82                         DP_NOTICE(cdev,
83                                   "Can't request 64-bit consistent allocations\n");
84                         return -EIO;
85                 }
86         } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
87                 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
88                 return -EIO;
89         }
90
91         return 0;
92 }
93
94 static void qed_free_pci(struct qed_dev *cdev)
95 {
96         struct pci_dev *pdev = cdev->pdev;
97
98         if (cdev->doorbells)
99                 iounmap(cdev->doorbells);
100         if (cdev->regview)
101                 iounmap(cdev->regview);
102         if (atomic_read(&pdev->enable_cnt) == 1)
103                 pci_release_regions(pdev);
104
105         pci_disable_device(pdev);
106 }
107
108 #define PCI_REVISION_ID_ERROR_VAL       0xff
109
110 /* Performs PCI initializations as well as initializing PCI-related parameters
111  * in the device structrue. Returns 0 in case of success.
112  */
113 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
114 {
115         u8 rev_id;
116         int rc;
117
118         cdev->pdev = pdev;
119
120         rc = pci_enable_device(pdev);
121         if (rc) {
122                 DP_NOTICE(cdev, "Cannot enable PCI device\n");
123                 goto err0;
124         }
125
126         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
127                 DP_NOTICE(cdev, "No memory region found in bar #0\n");
128                 rc = -EIO;
129                 goto err1;
130         }
131
132         if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
133                 DP_NOTICE(cdev, "No memory region found in bar #2\n");
134                 rc = -EIO;
135                 goto err1;
136         }
137
138         if (atomic_read(&pdev->enable_cnt) == 1) {
139                 rc = pci_request_regions(pdev, "qed");
140                 if (rc) {
141                         DP_NOTICE(cdev,
142                                   "Failed to request PCI memory resources\n");
143                         goto err1;
144                 }
145                 pci_set_master(pdev);
146                 pci_save_state(pdev);
147         }
148
149         pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
150         if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
151                 DP_NOTICE(cdev,
152                           "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
153                           rev_id);
154                 rc = -ENODEV;
155                 goto err2;
156         }
157         if (!pci_is_pcie(pdev)) {
158                 DP_NOTICE(cdev, "The bus is not PCI Express\n");
159                 rc = -EIO;
160                 goto err2;
161         }
162
163         cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
164         if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
165                 DP_NOTICE(cdev, "Cannot find power management capability\n");
166
167         rc = qed_set_coherency_mask(cdev);
168         if (rc)
169                 goto err2;
170
171         cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
172         cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
173         cdev->pci_params.irq = pdev->irq;
174
175         cdev->regview = pci_ioremap_bar(pdev, 0);
176         if (!cdev->regview) {
177                 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
178                 rc = -ENOMEM;
179                 goto err2;
180         }
181
182         if (IS_PF(cdev)) {
183                 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
184                 cdev->db_size = pci_resource_len(cdev->pdev, 2);
185                 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
186                 if (!cdev->doorbells) {
187                         DP_NOTICE(cdev, "Cannot map doorbell space\n");
188                         return -ENOMEM;
189                 }
190         }
191
192         return 0;
193
194 err2:
195         pci_release_regions(pdev);
196 err1:
197         pci_disable_device(pdev);
198 err0:
199         return rc;
200 }
201
202 int qed_fill_dev_info(struct qed_dev *cdev,
203                       struct qed_dev_info *dev_info)
204 {
205         struct qed_ptt  *ptt;
206
207         memset(dev_info, 0, sizeof(struct qed_dev_info));
208
209         dev_info->num_hwfns = cdev->num_hwfns;
210         dev_info->pci_mem_start = cdev->pci_params.mem_start;
211         dev_info->pci_mem_end = cdev->pci_params.mem_end;
212         dev_info->pci_irq = cdev->pci_params.irq;
213         dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
214                                     QED_PCI_ETH_ROCE);
215         dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
216         ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
217
218         if (IS_PF(cdev)) {
219                 dev_info->fw_major = FW_MAJOR_VERSION;
220                 dev_info->fw_minor = FW_MINOR_VERSION;
221                 dev_info->fw_rev = FW_REVISION_VERSION;
222                 dev_info->fw_eng = FW_ENGINEERING_VERSION;
223                 dev_info->mf_mode = cdev->mf_mode;
224                 dev_info->tx_switching = true;
225         } else {
226                 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
227                                       &dev_info->fw_minor, &dev_info->fw_rev,
228                                       &dev_info->fw_eng);
229         }
230
231         if (IS_PF(cdev)) {
232                 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
233                 if (ptt) {
234                         qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
235                                             &dev_info->mfw_rev, NULL);
236
237                         qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
238                                                &dev_info->flash_size);
239
240                         qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
241                 }
242         } else {
243                 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
244                                     &dev_info->mfw_rev, NULL);
245         }
246
247         return 0;
248 }
249
250 static void qed_free_cdev(struct qed_dev *cdev)
251 {
252         kfree((void *)cdev);
253 }
254
255 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
256 {
257         struct qed_dev *cdev;
258
259         cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
260         if (!cdev)
261                 return cdev;
262
263         qed_init_struct(cdev);
264
265         return cdev;
266 }
267
268 /* Sets the requested power state */
269 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
270 {
271         if (!cdev)
272                 return -ENODEV;
273
274         DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
275         return 0;
276 }
277
278 /* probing */
279 static struct qed_dev *qed_probe(struct pci_dev *pdev,
280                                  struct qed_probe_params *params)
281 {
282         struct qed_dev *cdev;
283         int rc;
284
285         cdev = qed_alloc_cdev(pdev);
286         if (!cdev)
287                 goto err0;
288
289         cdev->protocol = params->protocol;
290
291         if (params->is_vf)
292                 cdev->b_is_vf = true;
293
294         qed_init_dp(cdev, params->dp_module, params->dp_level);
295
296         rc = qed_init_pci(cdev, pdev);
297         if (rc) {
298                 DP_ERR(cdev, "init pci failed\n");
299                 goto err1;
300         }
301         DP_INFO(cdev, "PCI init completed successfully\n");
302
303         rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
304         if (rc) {
305                 DP_ERR(cdev, "hw prepare failed\n");
306                 goto err2;
307         }
308
309         DP_INFO(cdev, "qed_probe completed successffuly\n");
310
311         return cdev;
312
313 err2:
314         qed_free_pci(cdev);
315 err1:
316         qed_free_cdev(cdev);
317 err0:
318         return NULL;
319 }
320
321 static void qed_remove(struct qed_dev *cdev)
322 {
323         if (!cdev)
324                 return;
325
326         qed_hw_remove(cdev);
327
328         qed_free_pci(cdev);
329
330         qed_set_power_state(cdev, PCI_D3hot);
331
332         qed_free_cdev(cdev);
333 }
334
335 static void qed_disable_msix(struct qed_dev *cdev)
336 {
337         if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
338                 pci_disable_msix(cdev->pdev);
339                 kfree(cdev->int_params.msix_table);
340         } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
341                 pci_disable_msi(cdev->pdev);
342         }
343
344         memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
345 }
346
347 static int qed_enable_msix(struct qed_dev *cdev,
348                            struct qed_int_params *int_params)
349 {
350         int i, rc, cnt;
351
352         cnt = int_params->in.num_vectors;
353
354         for (i = 0; i < cnt; i++)
355                 int_params->msix_table[i].entry = i;
356
357         rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
358                                    int_params->in.min_msix_cnt, cnt);
359         if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
360             (rc % cdev->num_hwfns)) {
361                 pci_disable_msix(cdev->pdev);
362
363                 /* If fastpath is initialized, we need at least one interrupt
364                  * per hwfn [and the slow path interrupts]. New requested number
365                  * should be a multiple of the number of hwfns.
366                  */
367                 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
368                 DP_NOTICE(cdev,
369                           "Trying to enable MSI-X with less vectors (%d out of %d)\n",
370                           cnt, int_params->in.num_vectors);
371                 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
372                                            cnt);
373                 if (!rc)
374                         rc = cnt;
375         }
376
377         /* For VFs, we should return with an error in case we didn't get the
378          * exact number of msix vectors as we requested.
379          * Not doing that will lead to a crash when starting queues for
380          * this VF.
381          */
382         if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
383                 /* MSI-x configuration was achieved */
384                 int_params->out.int_mode = QED_INT_MODE_MSIX;
385                 int_params->out.num_vectors = rc;
386                 rc = 0;
387         } else {
388                 DP_NOTICE(cdev,
389                           "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
390                           cnt, rc);
391         }
392
393         return rc;
394 }
395
396 /* This function outputs the int mode and the number of enabled msix vector */
397 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
398 {
399         struct qed_int_params *int_params = &cdev->int_params;
400         struct msix_entry *tbl;
401         int rc = 0, cnt;
402
403         switch (int_params->in.int_mode) {
404         case QED_INT_MODE_MSIX:
405                 /* Allocate MSIX table */
406                 cnt = int_params->in.num_vectors;
407                 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
408                 if (!int_params->msix_table) {
409                         rc = -ENOMEM;
410                         goto out;
411                 }
412
413                 /* Enable MSIX */
414                 rc = qed_enable_msix(cdev, int_params);
415                 if (!rc)
416                         goto out;
417
418                 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
419                 kfree(int_params->msix_table);
420                 if (force_mode)
421                         goto out;
422                 /* Fallthrough */
423
424         case QED_INT_MODE_MSI:
425                 if (cdev->num_hwfns == 1) {
426                         rc = pci_enable_msi(cdev->pdev);
427                         if (!rc) {
428                                 int_params->out.int_mode = QED_INT_MODE_MSI;
429                                 goto out;
430                         }
431
432                         DP_NOTICE(cdev, "Failed to enable MSI\n");
433                         if (force_mode)
434                                 goto out;
435                 }
436                 /* Fallthrough */
437
438         case QED_INT_MODE_INTA:
439                         int_params->out.int_mode = QED_INT_MODE_INTA;
440                         rc = 0;
441                         goto out;
442         default:
443                 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
444                           int_params->in.int_mode);
445                 rc = -EINVAL;
446         }
447
448 out:
449         if (!rc)
450                 DP_INFO(cdev, "Using %s interrupts\n",
451                         int_params->out.int_mode == QED_INT_MODE_INTA ?
452                         "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
453                         "MSI" : "MSIX");
454         cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
455
456         return rc;
457 }
458
459 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
460                                     int index, void(*handler)(void *))
461 {
462         struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
463         int relative_idx = index / cdev->num_hwfns;
464
465         hwfn->simd_proto_handler[relative_idx].func = handler;
466         hwfn->simd_proto_handler[relative_idx].token = token;
467 }
468
469 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
470 {
471         struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
472         int relative_idx = index / cdev->num_hwfns;
473
474         memset(&hwfn->simd_proto_handler[relative_idx], 0,
475                sizeof(struct qed_simd_fp_handler));
476 }
477
478 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
479 {
480         tasklet_schedule((struct tasklet_struct *)tasklet);
481         return IRQ_HANDLED;
482 }
483
484 static irqreturn_t qed_single_int(int irq, void *dev_instance)
485 {
486         struct qed_dev *cdev = (struct qed_dev *)dev_instance;
487         struct qed_hwfn *hwfn;
488         irqreturn_t rc = IRQ_NONE;
489         u64 status;
490         int i, j;
491
492         for (i = 0; i < cdev->num_hwfns; i++) {
493                 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
494
495                 if (!status)
496                         continue;
497
498                 hwfn = &cdev->hwfns[i];
499
500                 /* Slowpath interrupt */
501                 if (unlikely(status & 0x1)) {
502                         tasklet_schedule(hwfn->sp_dpc);
503                         status &= ~0x1;
504                         rc = IRQ_HANDLED;
505                 }
506
507                 /* Fastpath interrupts */
508                 for (j = 0; j < 64; j++) {
509                         if ((0x2ULL << j) & status) {
510                                 struct qed_simd_fp_handler *p_handler =
511                                         &hwfn->simd_proto_handler[j];
512
513                                 if (p_handler->func)
514                                         p_handler->func(p_handler->token);
515                                 else
516                                         DP_NOTICE(hwfn,
517                                                   "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
518                                                   j, status);
519
520                                 status &= ~(0x2ULL << j);
521                                 rc = IRQ_HANDLED;
522                         }
523                 }
524
525                 if (unlikely(status))
526                         DP_VERBOSE(hwfn, NETIF_MSG_INTR,
527                                    "got an unknown interrupt status 0x%llx\n",
528                                    status);
529         }
530
531         return rc;
532 }
533
534 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
535 {
536         struct qed_dev *cdev = hwfn->cdev;
537         u32 int_mode;
538         int rc = 0;
539         u8 id;
540
541         int_mode = cdev->int_params.out.int_mode;
542         if (int_mode == QED_INT_MODE_MSIX) {
543                 id = hwfn->my_id;
544                 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
545                          id, cdev->pdev->bus->number,
546                          PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
547                 rc = request_irq(cdev->int_params.msix_table[id].vector,
548                                  qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
549         } else {
550                 unsigned long flags = 0;
551
552                 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
553                          cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
554                          PCI_FUNC(cdev->pdev->devfn));
555
556                 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
557                         flags |= IRQF_SHARED;
558
559                 rc = request_irq(cdev->pdev->irq, qed_single_int,
560                                  flags, cdev->name, cdev);
561         }
562
563         if (rc)
564                 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
565         else
566                 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
567                            "Requested slowpath %s\n",
568                            (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
569
570         return rc;
571 }
572
573 static void qed_slowpath_irq_free(struct qed_dev *cdev)
574 {
575         int i;
576
577         if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
578                 for_each_hwfn(cdev, i) {
579                         if (!cdev->hwfns[i].b_int_requested)
580                                 break;
581                         synchronize_irq(cdev->int_params.msix_table[i].vector);
582                         free_irq(cdev->int_params.msix_table[i].vector,
583                                  cdev->hwfns[i].sp_dpc);
584                 }
585         } else {
586                 if (QED_LEADING_HWFN(cdev)->b_int_requested)
587                         free_irq(cdev->pdev->irq, cdev);
588         }
589         qed_int_disable_post_isr_release(cdev);
590 }
591
592 static int qed_nic_stop(struct qed_dev *cdev)
593 {
594         int i, rc;
595
596         rc = qed_hw_stop(cdev);
597
598         for (i = 0; i < cdev->num_hwfns; i++) {
599                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
600
601                 if (p_hwfn->b_sp_dpc_enabled) {
602                         tasklet_disable(p_hwfn->sp_dpc);
603                         p_hwfn->b_sp_dpc_enabled = false;
604                         DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
605                                    "Disabled sp taskelt [hwfn %d] at %p\n",
606                                    i, p_hwfn->sp_dpc);
607                 }
608         }
609
610         qed_dbg_pf_exit(cdev);
611
612         return rc;
613 }
614
615 static int qed_nic_reset(struct qed_dev *cdev)
616 {
617         int rc;
618
619         rc = qed_hw_reset(cdev);
620         if (rc)
621                 return rc;
622
623         qed_resc_free(cdev);
624
625         return 0;
626 }
627
628 static int qed_nic_setup(struct qed_dev *cdev)
629 {
630         int rc, i;
631
632         /* Determine if interface is going to require LL2 */
633         if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
634                 for (i = 0; i < cdev->num_hwfns; i++) {
635                         struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
636
637                         p_hwfn->using_ll2 = true;
638                 }
639         }
640
641         rc = qed_resc_alloc(cdev);
642         if (rc)
643                 return rc;
644
645         DP_INFO(cdev, "Allocated qed resources\n");
646
647         qed_resc_setup(cdev);
648
649         return rc;
650 }
651
652 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
653 {
654         int limit = 0;
655
656         /* Mark the fastpath as free/used */
657         cdev->int_params.fp_initialized = cnt ? true : false;
658
659         if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
660                 limit = cdev->num_hwfns * 63;
661         else if (cdev->int_params.fp_msix_cnt)
662                 limit = cdev->int_params.fp_msix_cnt;
663
664         if (!limit)
665                 return -ENOMEM;
666
667         return min_t(int, cnt, limit);
668 }
669
670 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
671 {
672         memset(info, 0, sizeof(struct qed_int_info));
673
674         if (!cdev->int_params.fp_initialized) {
675                 DP_INFO(cdev,
676                         "Protocol driver requested interrupt information, but its support is not yet configured\n");
677                 return -EINVAL;
678         }
679
680         /* Need to expose only MSI-X information; Single IRQ is handled solely
681          * by qed.
682          */
683         if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
684                 int msix_base = cdev->int_params.fp_msix_base;
685
686                 info->msix_cnt = cdev->int_params.fp_msix_cnt;
687                 info->msix = &cdev->int_params.msix_table[msix_base];
688         }
689
690         return 0;
691 }
692
693 static int qed_slowpath_setup_int(struct qed_dev *cdev,
694                                   enum qed_int_mode int_mode)
695 {
696         struct qed_sb_cnt_info sb_cnt_info;
697         int num_l2_queues = 0;
698         int rc;
699         int i;
700
701         if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
702                 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
703                 return -EINVAL;
704         }
705
706         memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
707         cdev->int_params.in.int_mode = int_mode;
708         for_each_hwfn(cdev, i) {
709                 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
710                 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
711                 cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
712                 cdev->int_params.in.num_vectors++; /* slowpath */
713         }
714
715         /* We want a minimum of one slowpath and one fastpath vector per hwfn */
716         cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
717
718         if (is_kdump_kernel()) {
719                 DP_INFO(cdev,
720                         "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
721                         cdev->int_params.in.min_msix_cnt);
722                 cdev->int_params.in.num_vectors =
723                         cdev->int_params.in.min_msix_cnt;
724         }
725
726         rc = qed_set_int_mode(cdev, false);
727         if (rc)  {
728                 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
729                 return rc;
730         }
731
732         cdev->int_params.fp_msix_base = cdev->num_hwfns;
733         cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
734                                        cdev->num_hwfns;
735
736         if (!IS_ENABLED(CONFIG_QED_RDMA) ||
737             QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE)
738                 return 0;
739
740         for_each_hwfn(cdev, i)
741                 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
742
743         DP_VERBOSE(cdev, QED_MSG_RDMA,
744                    "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
745                    cdev->int_params.fp_msix_cnt, num_l2_queues);
746
747         if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
748                 cdev->int_params.rdma_msix_cnt =
749                         (cdev->int_params.fp_msix_cnt - num_l2_queues)
750                         / cdev->num_hwfns;
751                 cdev->int_params.rdma_msix_base =
752                         cdev->int_params.fp_msix_base + num_l2_queues;
753                 cdev->int_params.fp_msix_cnt = num_l2_queues;
754         } else {
755                 cdev->int_params.rdma_msix_cnt = 0;
756         }
757
758         DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
759                    cdev->int_params.rdma_msix_cnt,
760                    cdev->int_params.rdma_msix_base);
761
762         return 0;
763 }
764
765 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
766 {
767         int rc;
768
769         memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
770         cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
771
772         qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
773                             &cdev->int_params.in.num_vectors);
774         if (cdev->num_hwfns > 1) {
775                 u8 vectors = 0;
776
777                 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
778                 cdev->int_params.in.num_vectors += vectors;
779         }
780
781         /* We want a minimum of one fastpath vector per vf hwfn */
782         cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
783
784         rc = qed_set_int_mode(cdev, true);
785         if (rc)
786                 return rc;
787
788         cdev->int_params.fp_msix_base = 0;
789         cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
790
791         return 0;
792 }
793
794 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
795                    u8 *input_buf, u32 max_size, u8 *unzip_buf)
796 {
797         int rc;
798
799         p_hwfn->stream->next_in = input_buf;
800         p_hwfn->stream->avail_in = input_len;
801         p_hwfn->stream->next_out = unzip_buf;
802         p_hwfn->stream->avail_out = max_size;
803
804         rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
805
806         if (rc != Z_OK) {
807                 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
808                            rc);
809                 return 0;
810         }
811
812         rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
813         zlib_inflateEnd(p_hwfn->stream);
814
815         if (rc != Z_OK && rc != Z_STREAM_END) {
816                 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
817                            p_hwfn->stream->msg, rc);
818                 return 0;
819         }
820
821         return p_hwfn->stream->total_out / 4;
822 }
823
824 static int qed_alloc_stream_mem(struct qed_dev *cdev)
825 {
826         int i;
827         void *workspace;
828
829         for_each_hwfn(cdev, i) {
830                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
831
832                 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
833                 if (!p_hwfn->stream)
834                         return -ENOMEM;
835
836                 workspace = vzalloc(zlib_inflate_workspacesize());
837                 if (!workspace)
838                         return -ENOMEM;
839                 p_hwfn->stream->workspace = workspace;
840         }
841
842         return 0;
843 }
844
845 static void qed_free_stream_mem(struct qed_dev *cdev)
846 {
847         int i;
848
849         for_each_hwfn(cdev, i) {
850                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
851
852                 if (!p_hwfn->stream)
853                         return;
854
855                 vfree(p_hwfn->stream->workspace);
856                 kfree(p_hwfn->stream);
857         }
858 }
859
860 static void qed_update_pf_params(struct qed_dev *cdev,
861                                  struct qed_pf_params *params)
862 {
863         int i;
864
865         if (IS_ENABLED(CONFIG_QED_RDMA)) {
866                 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
867                 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
868                 /* divide by 3 the MRs to avoid MF ILT overflow */
869                 params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
870                 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
871         }
872
873         for (i = 0; i < cdev->num_hwfns; i++) {
874                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
875
876                 p_hwfn->pf_params = *params;
877         }
878 }
879
880 static int qed_slowpath_start(struct qed_dev *cdev,
881                               struct qed_slowpath_params *params)
882 {
883         struct qed_tunn_start_params tunn_info;
884         struct qed_mcp_drv_version drv_version;
885         const u8 *data = NULL;
886         struct qed_hwfn *hwfn;
887         int rc = -EINVAL;
888
889         if (qed_iov_wq_start(cdev))
890                 goto err;
891
892         if (IS_PF(cdev)) {
893                 rc = reject_firmware(&cdev->firmware, QED_FW_FILE_NAME,
894                                       &cdev->pdev->dev);
895                 if (rc) {
896                         DP_NOTICE(cdev,
897                                   "Failed to find fw file - /lib/firmware/%s\n",
898                                   QED_FW_FILE_NAME);
899                         goto err;
900                 }
901         }
902
903         cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
904         rc = qed_nic_setup(cdev);
905         if (rc)
906                 goto err;
907
908         if (IS_PF(cdev))
909                 rc = qed_slowpath_setup_int(cdev, params->int_mode);
910         else
911                 rc = qed_slowpath_vf_setup_int(cdev);
912         if (rc)
913                 goto err1;
914
915         if (IS_PF(cdev)) {
916                 /* Allocate stream for unzipping */
917                 rc = qed_alloc_stream_mem(cdev);
918                 if (rc)
919                         goto err2;
920
921                 /* First Dword used to diffrentiate between various sources */
922                 data = cdev->firmware->data + sizeof(u32);
923
924                 qed_dbg_pf_init(cdev);
925         }
926
927         memset(&tunn_info, 0, sizeof(tunn_info));
928         tunn_info.tunn_mode |=  1 << QED_MODE_VXLAN_TUNN |
929                                 1 << QED_MODE_L2GRE_TUNN |
930                                 1 << QED_MODE_IPGRE_TUNN |
931                                 1 << QED_MODE_L2GENEVE_TUNN |
932                                 1 << QED_MODE_IPGENEVE_TUNN;
933
934         tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
935         tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
936         tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
937
938         /* Start the slowpath */
939         rc = qed_hw_init(cdev, &tunn_info, true,
940                          cdev->int_params.out.int_mode,
941                          true, data);
942         if (rc)
943                 goto err2;
944
945         DP_INFO(cdev,
946                 "HW initialization and function start completed successfully\n");
947
948         /* Allocate LL2 interface if needed */
949         if (QED_LEADING_HWFN(cdev)->using_ll2) {
950                 rc = qed_ll2_alloc_if(cdev);
951                 if (rc)
952                         goto err3;
953         }
954         if (IS_PF(cdev)) {
955                 hwfn = QED_LEADING_HWFN(cdev);
956                 drv_version.version = (params->drv_major << 24) |
957                                       (params->drv_minor << 16) |
958                                       (params->drv_rev << 8) |
959                                       (params->drv_eng);
960                 strlcpy(drv_version.name, params->name,
961                         MCP_DRV_VER_STR_SIZE - 4);
962                 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
963                                               &drv_version);
964                 if (rc) {
965                         DP_NOTICE(cdev, "Failed sending drv version command\n");
966                         goto err4;
967                 }
968         }
969
970         qed_reset_vport_stats(cdev);
971
972         return 0;
973
974 err4:
975         qed_ll2_dealloc_if(cdev);
976 err3:
977         qed_hw_stop(cdev);
978 err2:
979         qed_hw_timers_stop_all(cdev);
980         if (IS_PF(cdev))
981                 qed_slowpath_irq_free(cdev);
982         qed_free_stream_mem(cdev);
983         qed_disable_msix(cdev);
984 err1:
985         qed_resc_free(cdev);
986 err:
987         if (IS_PF(cdev))
988                 release_firmware(cdev->firmware);
989
990         qed_iov_wq_stop(cdev, false);
991
992         return rc;
993 }
994
995 static int qed_slowpath_stop(struct qed_dev *cdev)
996 {
997         if (!cdev)
998                 return -ENODEV;
999
1000         qed_ll2_dealloc_if(cdev);
1001
1002         if (IS_PF(cdev)) {
1003                 qed_free_stream_mem(cdev);
1004                 if (IS_QED_ETH_IF(cdev))
1005                         qed_sriov_disable(cdev, true);
1006
1007                 qed_nic_stop(cdev);
1008                 qed_slowpath_irq_free(cdev);
1009         }
1010
1011         qed_disable_msix(cdev);
1012         qed_nic_reset(cdev);
1013
1014         qed_iov_wq_stop(cdev, true);
1015
1016         if (IS_PF(cdev))
1017                 release_firmware(cdev->firmware);
1018
1019         return 0;
1020 }
1021
1022 static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
1023                        char ver_str[VER_SIZE])
1024 {
1025         int i;
1026
1027         memcpy(cdev->name, name, NAME_SIZE);
1028         for_each_hwfn(cdev, i)
1029                 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1030
1031         memcpy(cdev->ver_str, ver_str, VER_SIZE);
1032         cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
1033 }
1034
1035 static u32 qed_sb_init(struct qed_dev *cdev,
1036                        struct qed_sb_info *sb_info,
1037                        void *sb_virt_addr,
1038                        dma_addr_t sb_phy_addr, u16 sb_id,
1039                        enum qed_sb_type type)
1040 {
1041         struct qed_hwfn *p_hwfn;
1042         int hwfn_index;
1043         u16 rel_sb_id;
1044         u8 n_hwfns;
1045         u32 rc;
1046
1047         /* RoCE uses single engine and CMT uses two engines. When using both
1048          * we force only a single engine. Storage uses only engine 0 too.
1049          */
1050         if (type == QED_SB_TYPE_L2_QUEUE)
1051                 n_hwfns = cdev->num_hwfns;
1052         else
1053                 n_hwfns = 1;
1054
1055         hwfn_index = sb_id % n_hwfns;
1056         p_hwfn = &cdev->hwfns[hwfn_index];
1057         rel_sb_id = sb_id / n_hwfns;
1058
1059         DP_VERBOSE(cdev, NETIF_MSG_INTR,
1060                    "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1061                    hwfn_index, rel_sb_id, sb_id);
1062
1063         rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
1064                              sb_virt_addr, sb_phy_addr, rel_sb_id);
1065
1066         return rc;
1067 }
1068
1069 static u32 qed_sb_release(struct qed_dev *cdev,
1070                           struct qed_sb_info *sb_info, u16 sb_id)
1071 {
1072         struct qed_hwfn *p_hwfn;
1073         int hwfn_index;
1074         u16 rel_sb_id;
1075         u32 rc;
1076
1077         hwfn_index = sb_id % cdev->num_hwfns;
1078         p_hwfn = &cdev->hwfns[hwfn_index];
1079         rel_sb_id = sb_id / cdev->num_hwfns;
1080
1081         DP_VERBOSE(cdev, NETIF_MSG_INTR,
1082                    "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1083                    hwfn_index, rel_sb_id, sb_id);
1084
1085         rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1086
1087         return rc;
1088 }
1089
1090 static bool qed_can_link_change(struct qed_dev *cdev)
1091 {
1092         return true;
1093 }
1094
1095 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1096 {
1097         struct qed_hwfn *hwfn;
1098         struct qed_mcp_link_params *link_params;
1099         struct qed_ptt *ptt;
1100         int rc;
1101
1102         if (!cdev)
1103                 return -ENODEV;
1104
1105         if (IS_VF(cdev))
1106                 return 0;
1107
1108         /* The link should be set only once per PF */
1109         hwfn = &cdev->hwfns[0];
1110
1111         ptt = qed_ptt_acquire(hwfn);
1112         if (!ptt)
1113                 return -EBUSY;
1114
1115         link_params = qed_mcp_get_link_params(hwfn);
1116         if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1117                 link_params->speed.autoneg = params->autoneg;
1118         if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1119                 link_params->speed.advertised_speeds = 0;
1120                 if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
1121                     (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
1122                         link_params->speed.advertised_speeds |=
1123                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1124                 if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
1125                         link_params->speed.advertised_speeds |=
1126                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1127                 if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
1128                         link_params->speed.advertised_speeds |=
1129                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1130                 if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
1131                         link_params->speed.advertised_speeds |=
1132                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1133                 if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
1134                         link_params->speed.advertised_speeds |=
1135                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1136                 if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
1137                         link_params->speed.advertised_speeds |=
1138                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1139         }
1140         if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1141                 link_params->speed.forced_speed = params->forced_speed;
1142         if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1143                 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1144                         link_params->pause.autoneg = true;
1145                 else
1146                         link_params->pause.autoneg = false;
1147                 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1148                         link_params->pause.forced_rx = true;
1149                 else
1150                         link_params->pause.forced_rx = false;
1151                 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1152                         link_params->pause.forced_tx = true;
1153                 else
1154                         link_params->pause.forced_tx = false;
1155         }
1156         if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1157                 switch (params->loopback_mode) {
1158                 case QED_LINK_LOOPBACK_INT_PHY:
1159                         link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1160                         break;
1161                 case QED_LINK_LOOPBACK_EXT_PHY:
1162                         link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1163                         break;
1164                 case QED_LINK_LOOPBACK_EXT:
1165                         link_params->loopback_mode = ETH_LOOPBACK_EXT;
1166                         break;
1167                 case QED_LINK_LOOPBACK_MAC:
1168                         link_params->loopback_mode = ETH_LOOPBACK_MAC;
1169                         break;
1170                 default:
1171                         link_params->loopback_mode = ETH_LOOPBACK_NONE;
1172                         break;
1173                 }
1174         }
1175
1176         rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1177
1178         qed_ptt_release(hwfn, ptt);
1179
1180         return rc;
1181 }
1182
1183 static int qed_get_port_type(u32 media_type)
1184 {
1185         int port_type;
1186
1187         switch (media_type) {
1188         case MEDIA_SFPP_10G_FIBER:
1189         case MEDIA_SFP_1G_FIBER:
1190         case MEDIA_XFP_FIBER:
1191         case MEDIA_MODULE_FIBER:
1192         case MEDIA_KR:
1193                 port_type = PORT_FIBRE;
1194                 break;
1195         case MEDIA_DA_TWINAX:
1196                 port_type = PORT_DA;
1197                 break;
1198         case MEDIA_BASE_T:
1199                 port_type = PORT_TP;
1200                 break;
1201         case MEDIA_NOT_PRESENT:
1202                 port_type = PORT_NONE;
1203                 break;
1204         case MEDIA_UNSPECIFIED:
1205         default:
1206                 port_type = PORT_OTHER;
1207                 break;
1208         }
1209         return port_type;
1210 }
1211
1212 static int qed_get_link_data(struct qed_hwfn *hwfn,
1213                              struct qed_mcp_link_params *params,
1214                              struct qed_mcp_link_state *link,
1215                              struct qed_mcp_link_capabilities *link_caps)
1216 {
1217         void *p;
1218
1219         if (!IS_PF(hwfn->cdev)) {
1220                 qed_vf_get_link_params(hwfn, params);
1221                 qed_vf_get_link_state(hwfn, link);
1222                 qed_vf_get_link_caps(hwfn, link_caps);
1223
1224                 return 0;
1225         }
1226
1227         p = qed_mcp_get_link_params(hwfn);
1228         if (!p)
1229                 return -ENXIO;
1230         memcpy(params, p, sizeof(*params));
1231
1232         p = qed_mcp_get_link_state(hwfn);
1233         if (!p)
1234                 return -ENXIO;
1235         memcpy(link, p, sizeof(*link));
1236
1237         p = qed_mcp_get_link_capabilities(hwfn);
1238         if (!p)
1239                 return -ENXIO;
1240         memcpy(link_caps, p, sizeof(*link_caps));
1241
1242         return 0;
1243 }
1244
1245 static void qed_fill_link(struct qed_hwfn *hwfn,
1246                           struct qed_link_output *if_link)
1247 {
1248         struct qed_mcp_link_params params;
1249         struct qed_mcp_link_state link;
1250         struct qed_mcp_link_capabilities link_caps;
1251         u32 media_type;
1252
1253         memset(if_link, 0, sizeof(*if_link));
1254
1255         /* Prepare source inputs */
1256         if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
1257                 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1258                 return;
1259         }
1260
1261         /* Set the link parameters to pass to protocol driver */
1262         if (link.link_up)
1263                 if_link->link_up = true;
1264
1265         /* TODO - at the moment assume supported and advertised speed equal */
1266         if_link->supported_caps = QED_LM_FIBRE_BIT;
1267         if (link_caps.default_speed_autoneg)
1268                 if_link->supported_caps |= QED_LM_Autoneg_BIT;
1269         if (params.pause.autoneg ||
1270             (params.pause.forced_rx && params.pause.forced_tx))
1271                 if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1272         if (params.pause.autoneg || params.pause.forced_rx ||
1273             params.pause.forced_tx)
1274                 if_link->supported_caps |= QED_LM_Pause_BIT;
1275
1276         if_link->advertised_caps = if_link->supported_caps;
1277         if (params.speed.autoneg)
1278                 if_link->advertised_caps |= QED_LM_Autoneg_BIT;
1279         else
1280                 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
1281         if (params.speed.advertised_speeds &
1282             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1283                 if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
1284                     QED_LM_1000baseT_Full_BIT;
1285         if (params.speed.advertised_speeds &
1286             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1287                 if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
1288         if (params.speed.advertised_speeds &
1289             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1290                 if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
1291         if (params.speed.advertised_speeds &
1292             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1293                 if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
1294         if (params.speed.advertised_speeds &
1295             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1296                 if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
1297         if (params.speed.advertised_speeds &
1298             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1299                 if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
1300
1301         if (link_caps.speed_capabilities &
1302             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1303                 if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
1304                     QED_LM_1000baseT_Full_BIT;
1305         if (link_caps.speed_capabilities &
1306             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1307                 if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
1308         if (link_caps.speed_capabilities &
1309             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1310                 if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
1311         if (link_caps.speed_capabilities &
1312             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1313                 if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
1314         if (link_caps.speed_capabilities &
1315             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1316                 if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
1317         if (link_caps.speed_capabilities &
1318             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1319                 if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
1320
1321         if (link.link_up)
1322                 if_link->speed = link.speed;
1323
1324         /* TODO - fill duplex properly */
1325         if_link->duplex = DUPLEX_FULL;
1326         qed_mcp_get_media_type(hwfn->cdev, &media_type);
1327         if_link->port = qed_get_port_type(media_type);
1328
1329         if_link->autoneg = params.speed.autoneg;
1330
1331         if (params.pause.autoneg)
1332                 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1333         if (params.pause.forced_rx)
1334                 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1335         if (params.pause.forced_tx)
1336                 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1337
1338         /* Link partner capabilities */
1339         if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
1340                 if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
1341         if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
1342                 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1343         if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1344                 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1345         if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1346                 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1347         if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1348                 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1349         if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1350                 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1351         if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1352                 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1353
1354         if (link.an_complete)
1355                 if_link->lp_caps |= QED_LM_Autoneg_BIT;
1356
1357         if (link.partner_adv_pause)
1358                 if_link->lp_caps |= QED_LM_Pause_BIT;
1359         if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1360             link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1361                 if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1362 }
1363
1364 static void qed_get_current_link(struct qed_dev *cdev,
1365                                  struct qed_link_output *if_link)
1366 {
1367         int i;
1368
1369         qed_fill_link(&cdev->hwfns[0], if_link);
1370
1371         for_each_hwfn(cdev, i)
1372                 qed_inform_vf_link_state(&cdev->hwfns[i]);
1373 }
1374
1375 void qed_link_update(struct qed_hwfn *hwfn)
1376 {
1377         void *cookie = hwfn->cdev->ops_cookie;
1378         struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1379         struct qed_link_output if_link;
1380
1381         qed_fill_link(hwfn, &if_link);
1382         qed_inform_vf_link_state(hwfn);
1383
1384         if (IS_LEAD_HWFN(hwfn) && cookie)
1385                 op->link_update(cookie, &if_link);
1386 }
1387
1388 static int qed_drain(struct qed_dev *cdev)
1389 {
1390         struct qed_hwfn *hwfn;
1391         struct qed_ptt *ptt;
1392         int i, rc;
1393
1394         if (IS_VF(cdev))
1395                 return 0;
1396
1397         for_each_hwfn(cdev, i) {
1398                 hwfn = &cdev->hwfns[i];
1399                 ptt = qed_ptt_acquire(hwfn);
1400                 if (!ptt) {
1401                         DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1402                         return -EBUSY;
1403                 }
1404                 rc = qed_mcp_drain(hwfn, ptt);
1405                 qed_ptt_release(hwfn, ptt);
1406                 if (rc)
1407                         return rc;
1408         }
1409
1410         return 0;
1411 }
1412
1413 static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
1414 {
1415         *rx_coal = cdev->rx_coalesce_usecs;
1416         *tx_coal = cdev->tx_coalesce_usecs;
1417 }
1418
1419 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
1420                             u8 qid, u16 sb_id)
1421 {
1422         struct qed_hwfn *hwfn;
1423         struct qed_ptt *ptt;
1424         int hwfn_index;
1425         int status = 0;
1426
1427         hwfn_index = qid % cdev->num_hwfns;
1428         hwfn = &cdev->hwfns[hwfn_index];
1429         ptt = qed_ptt_acquire(hwfn);
1430         if (!ptt)
1431                 return -EAGAIN;
1432
1433         status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal,
1434                                       qid / cdev->num_hwfns, sb_id);
1435         if (status)
1436                 goto out;
1437         status = qed_set_txq_coalesce(hwfn, ptt, tx_coal,
1438                                       qid / cdev->num_hwfns, sb_id);
1439 out:
1440         qed_ptt_release(hwfn, ptt);
1441
1442         return status;
1443 }
1444
1445 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
1446 {
1447         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1448         struct qed_ptt *ptt;
1449         int status = 0;
1450
1451         ptt = qed_ptt_acquire(hwfn);
1452         if (!ptt)
1453                 return -EAGAIN;
1454
1455         status = qed_mcp_set_led(hwfn, ptt, mode);
1456
1457         qed_ptt_release(hwfn, ptt);
1458
1459         return status;
1460 }
1461
1462 static struct qed_selftest_ops qed_selftest_ops_pass = {
1463         .selftest_memory = &qed_selftest_memory,
1464         .selftest_interrupt = &qed_selftest_interrupt,
1465         .selftest_register = &qed_selftest_register,
1466         .selftest_clock = &qed_selftest_clock,
1467 };
1468
1469 const struct qed_common_ops qed_common_ops_pass = {
1470         .selftest = &qed_selftest_ops_pass,
1471         .probe = &qed_probe,
1472         .remove = &qed_remove,
1473         .set_power_state = &qed_set_power_state,
1474         .set_id = &qed_set_id,
1475         .update_pf_params = &qed_update_pf_params,
1476         .slowpath_start = &qed_slowpath_start,
1477         .slowpath_stop = &qed_slowpath_stop,
1478         .set_fp_int = &qed_set_int_fp,
1479         .get_fp_int = &qed_get_int_fp,
1480         .sb_init = &qed_sb_init,
1481         .sb_release = &qed_sb_release,
1482         .simd_handler_config = &qed_simd_handler_config,
1483         .simd_handler_clean = &qed_simd_handler_clean,
1484         .can_link_change = &qed_can_link_change,
1485         .set_link = &qed_set_link,
1486         .get_link = &qed_get_current_link,
1487         .drain = &qed_drain,
1488         .update_msglvl = &qed_init_dp,
1489         .dbg_all_data = &qed_dbg_all_data,
1490         .dbg_all_data_size = &qed_dbg_all_data_size,
1491         .chain_alloc = &qed_chain_alloc,
1492         .chain_free = &qed_chain_free,
1493         .get_coalesce = &qed_get_coalesce,
1494         .set_coalesce = &qed_set_coalesce,
1495         .set_led = &qed_set_led,
1496 };
1497
1498 void qed_get_protocol_stats(struct qed_dev *cdev,
1499                             enum qed_mcp_protocol_type type,
1500                             union qed_mcp_protocol_stats *stats)
1501 {
1502         struct qed_eth_stats eth_stats;
1503
1504         memset(stats, 0, sizeof(*stats));
1505
1506         switch (type) {
1507         case QED_MCP_LAN_STATS:
1508                 qed_get_vport_stats(cdev, &eth_stats);
1509                 stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
1510                 stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
1511                 stats->lan_stats.fcs_err = -1;
1512                 break;
1513         default:
1514                 DP_ERR(cdev, "Invalid protocol type = %d\n", type);
1515                 return;
1516         }
1517 }