GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / infiniband / hw / bnxt_re / main.c
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Main component of the bnxt_re driver
37  */
38
39 #include <linux/module.h>
40 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
42 #include <linux/mutex.h>
43 #include <linux/list.h>
44 #include <linux/rculist.h>
45 #include <linux/spinlock.h>
46 #include <linux/pci.h>
47 #include <net/dcbnl.h>
48 #include <net/ipv6.h>
49 #include <net/addrconf.h>
50 #include <linux/if_ether.h>
51
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_user_verbs.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_addr.h>
56
57 #include "bnxt_ulp.h"
58 #include "roce_hsi.h"
59 #include "qplib_res.h"
60 #include "qplib_sp.h"
61 #include "qplib_fp.h"
62 #include "qplib_rcfw.h"
63 #include "bnxt_re.h"
64 #include "ib_verbs.h"
65 #include <rdma/bnxt_re-abi.h>
66 #include "bnxt.h"
67 #include "hw_counters.h"
68
69 static char version[] =
70                 BNXT_RE_DESC " v" ROCE_DRV_MODULE_VERSION "\n";
71
72 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
73 MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
74 MODULE_LICENSE("Dual BSD/GPL");
75
76 /* globals */
77 static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
78 /* Mutex to protect the list of bnxt_re devices added */
79 static DEFINE_MUTEX(bnxt_re_dev_lock);
80 static struct workqueue_struct *bnxt_re_wq;
81 static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
82
83 /* SR-IOV helper functions */
84
85 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
86 {
87         struct bnxt *bp;
88
89         bp = netdev_priv(rdev->en_dev->net);
90         if (BNXT_VF(bp))
91                 rdev->is_virtfn = 1;
92 }
93
94 /* Set the maximum number of each resource that the driver actually wants
95  * to allocate. This may be up to the maximum number the firmware has
96  * reserved for the function. The driver may choose to allocate fewer
97  * resources than the firmware maximum.
98  */
99 static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
100 {
101         u32 vf_qps = 0, vf_srqs = 0, vf_cqs = 0, vf_mrws = 0, vf_gids = 0;
102         u32 i;
103         u32 vf_pct;
104         u32 num_vfs;
105         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
106
107         rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
108                                           dev_attr->max_qp);
109
110         rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
111         /* Use max_mr from fw since max_mrw does not get set */
112         rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count,
113                                           dev_attr->max_mr);
114         rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
115                                            dev_attr->max_srq);
116         rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT,
117                                          dev_attr->max_cq);
118
119         for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
120                 rdev->qplib_ctx.tqm_count[i] =
121                 rdev->dev_attr.tqm_alloc_reqs[i];
122
123         if (rdev->num_vfs) {
124                 /*
125                  * Reserve a set of resources for the PF. Divide the remaining
126                  * resources among the VFs
127                  */
128                 vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
129                 num_vfs = 100 * rdev->num_vfs;
130                 vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs;
131                 vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs;
132                 vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs;
133                 /*
134                  * The driver allows many more MRs than other resources. If the
135                  * firmware does also, then reserve a fixed amount for the PF
136                  * and divide the rest among VFs. VFs may use many MRs for NFS
137                  * mounts, ISER, NVME applications, etc. If the firmware
138                  * severely restricts the number of MRs, then let PF have
139                  * half and divide the rest among VFs, as for the other
140                  * resource types.
141                  */
142                 if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K)
143                         vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs;
144                 else
145                         vf_mrws = (rdev->qplib_ctx.mrw_count -
146                                    BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs;
147                 vf_gids = BNXT_RE_MAX_GID_PER_VF;
148         }
149         rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws;
150         rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids;
151         rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps;
152         rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs;
153         rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs;
154 }
155
156 /* for handling bnxt_en callbacks later */
157 static void bnxt_re_stop(void *p)
158 {
159 }
160
161 static void bnxt_re_start(void *p)
162 {
163 }
164
165 static void bnxt_re_sriov_config(void *p, int num_vfs)
166 {
167         struct bnxt_re_dev *rdev = p;
168
169         if (!rdev)
170                 return;
171
172         rdev->num_vfs = num_vfs;
173         bnxt_re_set_resource_limits(rdev);
174         bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
175                                       &rdev->qplib_ctx);
176 }
177
178 static void bnxt_re_shutdown(void *p)
179 {
180         struct bnxt_re_dev *rdev = p;
181
182         if (!rdev)
183                 return;
184
185         bnxt_re_ib_unreg(rdev);
186 }
187
188 static void bnxt_re_stop_irq(void *handle)
189 {
190         struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
191         struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
192         struct bnxt_qplib_nq *nq;
193         int indx;
194
195         for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
196                 nq = &rdev->nq[indx - 1];
197                 bnxt_qplib_nq_stop_irq(nq, false);
198         }
199
200         bnxt_qplib_rcfw_stop_irq(rcfw, false);
201 }
202
203 static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
204 {
205         struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
206         struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
207         struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
208         struct bnxt_qplib_nq *nq;
209         int indx, rc;
210
211         if (!ent) {
212                 /* Not setting the f/w timeout bit in rcfw.
213                  * During the driver unload the first command
214                  * to f/w will timeout and that will set the
215                  * timeout bit.
216                  */
217                 dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
218                 return;
219         }
220
221         /* Vectors may change after restart, so update with new vectors
222          * in device sctructure.
223          */
224         for (indx = 0; indx < rdev->num_msix; indx++)
225                 rdev->msix_entries[indx].vector = ent[indx].vector;
226
227         bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
228                                   false);
229         for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
230                 nq = &rdev->nq[indx - 1];
231                 rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
232                                              msix_ent[indx].vector, false);
233                 if (rc)
234                         dev_warn(rdev_to_dev(rdev),
235                                  "Failed to reinit NQ index %d\n", indx - 1);
236         }
237 }
238
239 static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
240         .ulp_async_notifier = NULL,
241         .ulp_stop = bnxt_re_stop,
242         .ulp_start = bnxt_re_start,
243         .ulp_sriov_config = bnxt_re_sriov_config,
244         .ulp_shutdown = bnxt_re_shutdown,
245         .ulp_irq_stop = bnxt_re_stop_irq,
246         .ulp_irq_restart = bnxt_re_start_irq
247 };
248
249 /* RoCE -> Net driver */
250
251 /* Driver registration routines used to let the networking driver (bnxt_en)
252  * to know that the RoCE driver is now installed
253  */
254 static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
255 {
256         struct bnxt_en_dev *en_dev;
257         int rc;
258
259         if (!rdev)
260                 return -EINVAL;
261
262         en_dev = rdev->en_dev;
263
264         rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
265                                                     BNXT_ROCE_ULP);
266         return rc;
267 }
268
269 static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
270 {
271         struct bnxt_en_dev *en_dev;
272         int rc = 0;
273
274         if (!rdev)
275                 return -EINVAL;
276
277         en_dev = rdev->en_dev;
278
279         rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
280                                                   &bnxt_re_ulp_ops, rdev);
281         return rc;
282 }
283
284 static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
285 {
286         struct bnxt_en_dev *en_dev;
287         int rc;
288
289         if (!rdev)
290                 return -EINVAL;
291
292         en_dev = rdev->en_dev;
293
294
295         rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
296
297         return rc;
298 }
299
300 static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
301 {
302         int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
303         struct bnxt_en_dev *en_dev;
304
305         if (!rdev)
306                 return -EINVAL;
307
308         en_dev = rdev->en_dev;
309
310         num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
311
312         num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
313                                                          rdev->msix_entries,
314                                                          num_msix_want);
315         if (num_msix_got < BNXT_RE_MIN_MSIX) {
316                 rc = -EINVAL;
317                 goto done;
318         }
319         if (num_msix_got != num_msix_want) {
320                 dev_warn(rdev_to_dev(rdev),
321                          "Requested %d MSI-X vectors, got %d\n",
322                          num_msix_want, num_msix_got);
323         }
324         rdev->num_msix = num_msix_got;
325 done:
326         return rc;
327 }
328
329 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr,
330                                   u16 opcd, u16 crid, u16 trid)
331 {
332         hdr->req_type = cpu_to_le16(opcd);
333         hdr->cmpl_ring = cpu_to_le16(crid);
334         hdr->target_id = cpu_to_le16(trid);
335 }
336
337 static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
338                                 int msg_len, void *resp, int resp_max_len,
339                                 int timeout)
340 {
341         fw_msg->msg = msg;
342         fw_msg->msg_len = msg_len;
343         fw_msg->resp = resp;
344         fw_msg->resp_max_len = resp_max_len;
345         fw_msg->timeout = timeout;
346 }
347
348 static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
349 {
350         struct bnxt_en_dev *en_dev = rdev->en_dev;
351         struct hwrm_ring_free_input req = {0};
352         struct hwrm_ring_free_output resp;
353         struct bnxt_fw_msg fw_msg;
354         int rc = -EINVAL;
355
356         if (!en_dev)
357                 return rc;
358
359         memset(&fw_msg, 0, sizeof(fw_msg));
360
361         bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
362         req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
363         req.ring_id = cpu_to_le16(fw_ring_id);
364         bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
365                             sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
366         rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
367         if (rc)
368                 dev_err(rdev_to_dev(rdev),
369                         "Failed to free HW ring:%d :%#x", req.ring_id, rc);
370         return rc;
371 }
372
373 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
374                                   int pages, int type, u32 ring_mask,
375                                   u32 map_index, u16 *fw_ring_id)
376 {
377         struct bnxt_en_dev *en_dev = rdev->en_dev;
378         struct hwrm_ring_alloc_input req = {0};
379         struct hwrm_ring_alloc_output resp;
380         struct bnxt_fw_msg fw_msg;
381         int rc = -EINVAL;
382
383         if (!en_dev)
384                 return rc;
385
386         memset(&fw_msg, 0, sizeof(fw_msg));
387         bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
388         req.enables = 0;
389         req.page_tbl_addr =  cpu_to_le64(dma_arr[0]);
390         if (pages > 1) {
391                 /* Page size is in log2 units */
392                 req.page_size = BNXT_PAGE_SHIFT;
393                 req.page_tbl_depth = 1;
394         }
395         req.fbo = 0;
396         /* Association of ring index with doorbell index and MSIX number */
397         req.logical_id = cpu_to_le16(map_index);
398         req.length = cpu_to_le32(ring_mask + 1);
399         req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
400         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
401         bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
402                             sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
403         rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
404         if (!rc)
405                 *fw_ring_id = le16_to_cpu(resp.ring_id);
406
407         return rc;
408 }
409
410 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
411                                       u32 fw_stats_ctx_id)
412 {
413         struct bnxt_en_dev *en_dev = rdev->en_dev;
414         struct hwrm_stat_ctx_free_input req = {0};
415         struct bnxt_fw_msg fw_msg;
416         int rc = -EINVAL;
417
418         if (!en_dev)
419                 return rc;
420
421         memset(&fw_msg, 0, sizeof(fw_msg));
422
423         bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
424         req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
425         bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&req,
426                             sizeof(req), DFLT_HWRM_CMD_TIMEOUT);
427         rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
428         if (rc)
429                 dev_err(rdev_to_dev(rdev),
430                         "Failed to free HW stats context %#x", rc);
431
432         return rc;
433 }
434
435 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
436                                        dma_addr_t dma_map,
437                                        u32 *fw_stats_ctx_id)
438 {
439         struct hwrm_stat_ctx_alloc_output resp = {0};
440         struct hwrm_stat_ctx_alloc_input req = {0};
441         struct bnxt_en_dev *en_dev = rdev->en_dev;
442         struct bnxt_fw_msg fw_msg;
443         int rc = -EINVAL;
444
445         *fw_stats_ctx_id = INVALID_STATS_CTX_ID;
446
447         if (!en_dev)
448                 return rc;
449
450         memset(&fw_msg, 0, sizeof(fw_msg));
451
452         bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
453         req.update_period_ms = cpu_to_le32(1000);
454         req.stats_dma_addr = cpu_to_le64(dma_map);
455         req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
456         bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
457                             sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
458         rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
459         if (!rc)
460                 *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
461
462         return rc;
463 }
464
465 /* Device */
466
467 static bool is_bnxt_re_dev(struct net_device *netdev)
468 {
469         struct ethtool_drvinfo drvinfo;
470
471         if (netdev->ethtool_ops && netdev->ethtool_ops->get_drvinfo) {
472                 memset(&drvinfo, 0, sizeof(drvinfo));
473                 netdev->ethtool_ops->get_drvinfo(netdev, &drvinfo);
474
475                 if (strcmp(drvinfo.driver, "bnxt_en"))
476                         return false;
477                 return true;
478         }
479         return false;
480 }
481
482 static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
483 {
484         struct bnxt_re_dev *rdev;
485
486         rcu_read_lock();
487         list_for_each_entry_rcu(rdev, &bnxt_re_dev_list, list) {
488                 if (rdev->netdev == netdev) {
489                         rcu_read_unlock();
490                         return rdev;
491                 }
492         }
493         rcu_read_unlock();
494         return NULL;
495 }
496
497 static void bnxt_re_dev_unprobe(struct net_device *netdev,
498                                 struct bnxt_en_dev *en_dev)
499 {
500         dev_put(netdev);
501         module_put(en_dev->pdev->driver->driver.owner);
502 }
503
504 static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
505 {
506         struct bnxt *bp = netdev_priv(netdev);
507         struct bnxt_en_dev *en_dev;
508         struct pci_dev *pdev;
509
510         /* Call bnxt_en's RoCE probe via indirect API */
511         if (!bp->ulp_probe)
512                 return ERR_PTR(-EINVAL);
513
514         en_dev = bp->ulp_probe(netdev);
515         if (IS_ERR(en_dev))
516                 return en_dev;
517
518         pdev = en_dev->pdev;
519         if (!pdev)
520                 return ERR_PTR(-EINVAL);
521
522         if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) {
523                 dev_info(&pdev->dev,
524                         "%s: probe error: RoCE is not supported on this device",
525                         ROCE_DRV_MODULE_NAME);
526                 return ERR_PTR(-ENODEV);
527         }
528
529         /* Bump net device reference count */
530         if (!try_module_get(pdev->driver->driver.owner))
531                 return ERR_PTR(-ENODEV);
532
533         dev_hold(netdev);
534
535         return en_dev;
536 }
537
538 static void bnxt_re_unregister_ib(struct bnxt_re_dev *rdev)
539 {
540         ib_unregister_device(&rdev->ibdev);
541 }
542
543 static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
544 {
545         struct ib_device *ibdev = &rdev->ibdev;
546
547         /* ib device init */
548         ibdev->owner = THIS_MODULE;
549         ibdev->node_type = RDMA_NODE_IB_CA;
550         strlcpy(ibdev->name, "bnxt_re%d", IB_DEVICE_NAME_MAX);
551         strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
552                 strlen(BNXT_RE_DESC) + 5);
553         ibdev->phys_port_cnt = 1;
554
555         bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid);
556
557         ibdev->num_comp_vectors = 1;
558         ibdev->dev.parent = &rdev->en_dev->pdev->dev;
559         ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
560
561         /* User space */
562         ibdev->uverbs_abi_ver = BNXT_RE_ABI_VERSION;
563         ibdev->uverbs_cmd_mask =
564                         (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
565                         (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
566                         (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
567                         (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
568                         (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
569                         (1ull << IB_USER_VERBS_CMD_REG_MR)              |
570                         (1ull << IB_USER_VERBS_CMD_REREG_MR)            |
571                         (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
572                         (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
573                         (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
574                         (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
575                         (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
576                         (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
577                         (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
578                         (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
579                         (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
580                         (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
581                         (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
582                         (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
583                         (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
584                         (1ull << IB_USER_VERBS_CMD_CREATE_AH)           |
585                         (1ull << IB_USER_VERBS_CMD_MODIFY_AH)           |
586                         (1ull << IB_USER_VERBS_CMD_QUERY_AH)            |
587                         (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
588         /* POLL_CQ and REQ_NOTIFY_CQ is directly handled in libbnxt_re */
589
590         /* Kernel verbs */
591         ibdev->query_device             = bnxt_re_query_device;
592         ibdev->modify_device            = bnxt_re_modify_device;
593
594         ibdev->query_port               = bnxt_re_query_port;
595         ibdev->get_port_immutable       = bnxt_re_get_port_immutable;
596         ibdev->get_dev_fw_str           = bnxt_re_query_fw_str;
597         ibdev->query_pkey               = bnxt_re_query_pkey;
598         ibdev->get_netdev               = bnxt_re_get_netdev;
599         ibdev->add_gid                  = bnxt_re_add_gid;
600         ibdev->del_gid                  = bnxt_re_del_gid;
601         ibdev->get_link_layer           = bnxt_re_get_link_layer;
602
603         ibdev->alloc_pd                 = bnxt_re_alloc_pd;
604         ibdev->dealloc_pd               = bnxt_re_dealloc_pd;
605
606         ibdev->create_ah                = bnxt_re_create_ah;
607         ibdev->modify_ah                = bnxt_re_modify_ah;
608         ibdev->query_ah                 = bnxt_re_query_ah;
609         ibdev->destroy_ah               = bnxt_re_destroy_ah;
610
611         ibdev->create_srq               = bnxt_re_create_srq;
612         ibdev->modify_srq               = bnxt_re_modify_srq;
613         ibdev->query_srq                = bnxt_re_query_srq;
614         ibdev->destroy_srq              = bnxt_re_destroy_srq;
615         ibdev->post_srq_recv            = bnxt_re_post_srq_recv;
616
617         ibdev->create_qp                = bnxt_re_create_qp;
618         ibdev->modify_qp                = bnxt_re_modify_qp;
619         ibdev->query_qp                 = bnxt_re_query_qp;
620         ibdev->destroy_qp               = bnxt_re_destroy_qp;
621
622         ibdev->post_send                = bnxt_re_post_send;
623         ibdev->post_recv                = bnxt_re_post_recv;
624
625         ibdev->create_cq                = bnxt_re_create_cq;
626         ibdev->destroy_cq               = bnxt_re_destroy_cq;
627         ibdev->poll_cq                  = bnxt_re_poll_cq;
628         ibdev->req_notify_cq            = bnxt_re_req_notify_cq;
629
630         ibdev->get_dma_mr               = bnxt_re_get_dma_mr;
631         ibdev->dereg_mr                 = bnxt_re_dereg_mr;
632         ibdev->alloc_mr                 = bnxt_re_alloc_mr;
633         ibdev->map_mr_sg                = bnxt_re_map_mr_sg;
634
635         ibdev->reg_user_mr              = bnxt_re_reg_user_mr;
636         ibdev->alloc_ucontext           = bnxt_re_alloc_ucontext;
637         ibdev->dealloc_ucontext         = bnxt_re_dealloc_ucontext;
638         ibdev->mmap                     = bnxt_re_mmap;
639         ibdev->get_hw_stats             = bnxt_re_ib_get_hw_stats;
640         ibdev->alloc_hw_stats           = bnxt_re_ib_alloc_hw_stats;
641
642         ibdev->driver_id = RDMA_DRIVER_BNXT_RE;
643         return ib_register_device(ibdev, NULL);
644 }
645
646 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
647                         char *buf)
648 {
649         struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
650
651         return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
652 }
653
654 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
655                         char *buf)
656 {
657         struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
658
659         return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
660 }
661
662 static DEVICE_ATTR(hw_rev, 0444, show_rev, NULL);
663 static DEVICE_ATTR(hca_type, 0444, show_hca, NULL);
664
665 static struct device_attribute *bnxt_re_attributes[] = {
666         &dev_attr_hw_rev,
667         &dev_attr_hca_type
668 };
669
670 static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
671 {
672         dev_put(rdev->netdev);
673         rdev->netdev = NULL;
674
675         mutex_lock(&bnxt_re_dev_lock);
676         list_del_rcu(&rdev->list);
677         mutex_unlock(&bnxt_re_dev_lock);
678
679         synchronize_rcu();
680
681         ib_dealloc_device(&rdev->ibdev);
682         /* rdev is gone */
683 }
684
685 static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
686                                            struct bnxt_en_dev *en_dev)
687 {
688         struct bnxt_re_dev *rdev;
689
690         /* Allocate bnxt_re_dev instance here */
691         rdev = (struct bnxt_re_dev *)ib_alloc_device(sizeof(*rdev));
692         if (!rdev) {
693                 dev_err(NULL, "%s: bnxt_re_dev allocation failure!",
694                         ROCE_DRV_MODULE_NAME);
695                 return NULL;
696         }
697         /* Default values */
698         rdev->netdev = netdev;
699         dev_hold(rdev->netdev);
700         rdev->en_dev = en_dev;
701         rdev->id = rdev->en_dev->pdev->devfn;
702         INIT_LIST_HEAD(&rdev->qp_list);
703         mutex_init(&rdev->qp_lock);
704         atomic_set(&rdev->qp_count, 0);
705         atomic_set(&rdev->cq_count, 0);
706         atomic_set(&rdev->srq_count, 0);
707         atomic_set(&rdev->mr_count, 0);
708         atomic_set(&rdev->mw_count, 0);
709         rdev->cosq[0] = 0xFFFF;
710         rdev->cosq[1] = 0xFFFF;
711
712         mutex_lock(&bnxt_re_dev_lock);
713         list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list);
714         mutex_unlock(&bnxt_re_dev_lock);
715         return rdev;
716 }
717
718 static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
719                                              *unaffi_async)
720 {
721         switch (unaffi_async->event) {
722         case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
723                 break;
724         case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
725                 break;
726         case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
727                 break;
728         case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
729                 break;
730         case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
731                 break;
732         case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
733                 break;
734         case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
735                 break;
736         case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
737                 break;
738         case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
739                 break;
740         case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
741                 break;
742         case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
743                 break;
744         default:
745                 return -EINVAL;
746         }
747         return 0;
748 }
749
750 static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
751                                          struct bnxt_re_qp *qp)
752 {
753         struct ib_event event;
754         unsigned int flags;
755
756         if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
757             rdma_is_kernel_res(&qp->ib_qp.res)) {
758                 flags = bnxt_re_lock_cqs(qp);
759                 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
760                 bnxt_re_unlock_cqs(qp, flags);
761         }
762
763         memset(&event, 0, sizeof(event));
764         if (qp->qplib_qp.srq) {
765                 event.device = &qp->rdev->ibdev;
766                 event.element.qp = &qp->ib_qp;
767                 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
768         }
769
770         if (event.device && qp->ib_qp.event_handler)
771                 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
772
773         return 0;
774 }
775
776 static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
777                                            void *obj)
778 {
779         int rc = 0;
780         u8 event;
781
782         if (!obj)
783                 return rc; /* QP was already dead, still return success */
784
785         event = affi_async->event;
786         if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) {
787                 struct bnxt_qplib_qp *lib_qp = obj;
788                 struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp,
789                                                      qplib_qp);
790                 rc = bnxt_re_handle_qp_async_event(affi_async, qp);
791         }
792         return rc;
793 }
794
795 static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
796                                void *aeqe, void *obj)
797 {
798         struct creq_qp_event *affi_async;
799         struct creq_func_event *unaffi_async;
800         u8 type;
801         int rc;
802
803         type = ((struct creq_base *)aeqe)->type;
804         if (type == CREQ_BASE_TYPE_FUNC_EVENT) {
805                 unaffi_async = aeqe;
806                 rc = bnxt_re_handle_unaffi_async_event(unaffi_async);
807         } else {
808                 affi_async = aeqe;
809                 rc = bnxt_re_handle_affi_async_event(affi_async, obj);
810         }
811
812         return rc;
813 }
814
815 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
816                                 struct bnxt_qplib_srq *handle, u8 event)
817 {
818         struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
819                                                qplib_srq);
820         struct ib_event ib_event;
821         int rc = 0;
822
823         if (!srq) {
824                 dev_err(NULL, "%s: SRQ is NULL, SRQN not handled",
825                         ROCE_DRV_MODULE_NAME);
826                 rc = -EINVAL;
827                 goto done;
828         }
829         ib_event.device = &srq->rdev->ibdev;
830         ib_event.element.srq = &srq->ib_srq;
831         if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
832                 ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
833         else
834                 ib_event.event = IB_EVENT_SRQ_ERR;
835
836         if (srq->ib_srq.event_handler) {
837                 /* Lock event_handler? */
838                 (*srq->ib_srq.event_handler)(&ib_event,
839                                              srq->ib_srq.srq_context);
840         }
841 done:
842         return rc;
843 }
844
845 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
846                                struct bnxt_qplib_cq *handle)
847 {
848         struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
849                                              qplib_cq);
850
851         if (!cq) {
852                 dev_err(NULL, "%s: CQ is NULL, CQN not handled",
853                         ROCE_DRV_MODULE_NAME);
854                 return -EINVAL;
855         }
856         if (cq->ib_cq.comp_handler) {
857                 /* Lock comp_handler? */
858                 (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
859         }
860
861         return 0;
862 }
863
864 static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
865 {
866         int i;
867
868         for (i = 1; i < rdev->num_msix; i++)
869                 bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
870
871         if (rdev->qplib_res.rcfw)
872                 bnxt_qplib_cleanup_res(&rdev->qplib_res);
873 }
874
875 static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
876 {
877         int rc = 0, i;
878         int num_vec_enabled = 0;
879
880         bnxt_qplib_init_res(&rdev->qplib_res);
881
882         for (i = 1; i < rdev->num_msix ; i++) {
883                 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
884                                           i - 1, rdev->msix_entries[i].vector,
885                                           rdev->msix_entries[i].db_offset,
886                                           &bnxt_re_cqn_handler,
887                                           &bnxt_re_srqn_handler);
888
889                 if (rc) {
890                         dev_err(rdev_to_dev(rdev),
891                                 "Failed to enable NQ with rc = 0x%x", rc);
892                         goto fail;
893                 }
894                 num_vec_enabled++;
895         }
896         return 0;
897 fail:
898         for (i = num_vec_enabled; i >= 0; i--)
899                 bnxt_qplib_disable_nq(&rdev->nq[i]);
900
901         return rc;
902 }
903
904 static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
905 {
906         int i;
907
908         for (i = 0; i < rdev->num_msix - 1; i++) {
909                 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
910                 bnxt_qplib_free_nq(&rdev->nq[i]);
911         }
912 }
913
914 static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
915 {
916         bnxt_re_free_nq_res(rdev);
917
918         if (rdev->qplib_res.dpi_tbl.max) {
919                 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
920                                        &rdev->qplib_res.dpi_tbl,
921                                        &rdev->dpi_privileged);
922         }
923         if (rdev->qplib_res.rcfw) {
924                 bnxt_qplib_free_res(&rdev->qplib_res);
925                 rdev->qplib_res.rcfw = NULL;
926         }
927 }
928
929 static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
930 {
931         int rc = 0, i;
932         int num_vec_created = 0;
933
934         /* Configure and allocate resources for qplib */
935         rdev->qplib_res.rcfw = &rdev->rcfw;
936         rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
937                                      rdev->is_virtfn);
938         if (rc)
939                 goto fail;
940
941         rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
942                                   rdev->netdev, &rdev->dev_attr);
943         if (rc)
944                 goto fail;
945
946         rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
947                                   &rdev->dpi_privileged,
948                                   rdev);
949         if (rc)
950                 goto dealloc_res;
951
952         for (i = 0; i < rdev->num_msix - 1; i++) {
953                 rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT +
954                         BNXT_RE_MAX_SRQC_COUNT + 2;
955                 rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq[i]);
956                 if (rc) {
957                         dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
958                                 i, rc);
959                         goto free_nq;
960                 }
961                 rc = bnxt_re_net_ring_alloc
962                         (rdev, rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr,
963                          rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count,
964                          HWRM_RING_ALLOC_CMPL,
965                          BNXT_QPLIB_NQE_MAX_CNT - 1,
966                          rdev->msix_entries[i + 1].ring_idx,
967                          &rdev->nq[i].ring_id);
968                 if (rc) {
969                         dev_err(rdev_to_dev(rdev),
970                                 "Failed to allocate NQ fw id with rc = 0x%x",
971                                 rc);
972                         bnxt_qplib_free_nq(&rdev->nq[i]);
973                         goto free_nq;
974                 }
975                 num_vec_created++;
976         }
977         return 0;
978 free_nq:
979         for (i = num_vec_created; i >= 0; i--) {
980                 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
981                 bnxt_qplib_free_nq(&rdev->nq[i]);
982         }
983         bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
984                                &rdev->qplib_res.dpi_tbl,
985                                &rdev->dpi_privileged);
986 dealloc_res:
987         bnxt_qplib_free_res(&rdev->qplib_res);
988
989 fail:
990         rdev->qplib_res.rcfw = NULL;
991         return rc;
992 }
993
994 static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
995                                    u8 port_num, enum ib_event_type event)
996 {
997         struct ib_event ib_event;
998
999         ib_event.device = ibdev;
1000         if (qp) {
1001                 ib_event.element.qp = qp;
1002                 ib_event.event = event;
1003                 if (qp->event_handler)
1004                         qp->event_handler(&ib_event, qp->qp_context);
1005
1006         } else {
1007                 ib_event.element.port_num = port_num;
1008                 ib_event.event = event;
1009                 ib_dispatch_event(&ib_event);
1010         }
1011 }
1012
1013 #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN      0x02
1014 static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
1015                                       u64 *cid_map)
1016 {
1017         struct hwrm_queue_pri2cos_qcfg_input req = {0};
1018         struct bnxt *bp = netdev_priv(rdev->netdev);
1019         struct hwrm_queue_pri2cos_qcfg_output resp;
1020         struct bnxt_en_dev *en_dev = rdev->en_dev;
1021         struct bnxt_fw_msg fw_msg;
1022         u32 flags = 0;
1023         u8 *qcfgmap, *tmp_map;
1024         int rc = 0, i;
1025
1026         if (!cid_map)
1027                 return -EINVAL;
1028
1029         memset(&fw_msg, 0, sizeof(fw_msg));
1030         bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1031                               HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
1032         flags |= (dir & 0x01);
1033         flags |= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN;
1034         req.flags = cpu_to_le32(flags);
1035         req.port_id = bp->pf.port_id;
1036
1037         bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
1038                             sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
1039         rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
1040         if (rc)
1041                 return rc;
1042
1043         if (resp.queue_cfg_info) {
1044                 dev_warn(rdev_to_dev(rdev),
1045                          "Asymmetric cos queue configuration detected");
1046                 dev_warn(rdev_to_dev(rdev),
1047                          " on device, QoS may not be fully functional\n");
1048         }
1049         qcfgmap = &resp.pri0_cos_queue_id;
1050         tmp_map = (u8 *)cid_map;
1051         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1052                 tmp_map[i] = qcfgmap[i];
1053
1054         return rc;
1055 }
1056
1057 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
1058                                         struct bnxt_re_qp *qp)
1059 {
1060         return (qp->ib_qp.qp_type == IB_QPT_GSI) || (qp == rdev->qp1_sqp);
1061 }
1062
1063 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
1064 {
1065         int mask = IB_QP_STATE;
1066         struct ib_qp_attr qp_attr;
1067         struct bnxt_re_qp *qp;
1068
1069         qp_attr.qp_state = IB_QPS_ERR;
1070         mutex_lock(&rdev->qp_lock);
1071         list_for_each_entry(qp, &rdev->qp_list, list) {
1072                 /* Modify the state of all QPs except QP1/Shadow QP */
1073                 if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
1074                         if (qp->qplib_qp.state !=
1075                             CMDQ_MODIFY_QP_NEW_STATE_RESET &&
1076                             qp->qplib_qp.state !=
1077                             CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1078                                 bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
1079                                                        1, IB_EVENT_QP_FATAL);
1080                                 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
1081                                                   NULL);
1082                         }
1083                 }
1084         }
1085         mutex_unlock(&rdev->qp_lock);
1086 }
1087
1088 static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
1089 {
1090         struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
1091         struct bnxt_qplib_gid gid;
1092         u16 gid_idx, index;
1093         int rc = 0;
1094
1095         if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
1096                 return 0;
1097
1098         if (!sgid_tbl) {
1099                 dev_err(rdev_to_dev(rdev), "QPLIB: SGID table not allocated");
1100                 return -EINVAL;
1101         }
1102
1103         for (index = 0; index < sgid_tbl->active; index++) {
1104                 gid_idx = sgid_tbl->hw_id[index];
1105
1106                 if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
1107                             sizeof(bnxt_qplib_gid_zero)))
1108                         continue;
1109                 /* need to modify the VLAN enable setting of non VLAN GID only
1110                  * as setting is done for VLAN GID while adding GID
1111                  */
1112                 if (sgid_tbl->vlan[index])
1113                         continue;
1114
1115                 memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
1116
1117                 rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
1118                                             rdev->qplib_res.netdev->dev_addr);
1119         }
1120
1121         return rc;
1122 }
1123
1124 static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
1125 {
1126         u32 prio_map = 0, tmp_map = 0;
1127         struct net_device *netdev;
1128         struct dcb_app app;
1129
1130         netdev = rdev->netdev;
1131
1132         memset(&app, 0, sizeof(app));
1133         app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
1134         app.protocol = ETH_P_IBOE;
1135         tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1136         prio_map = tmp_map;
1137
1138         app.selector = IEEE_8021QAZ_APP_SEL_DGRAM;
1139         app.protocol = ROCE_V2_UDP_DPORT;
1140         tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1141         prio_map |= tmp_map;
1142
1143         return prio_map;
1144 }
1145
1146 static void bnxt_re_parse_cid_map(u8 prio_map, u8 *cid_map, u16 *cosq)
1147 {
1148         u16 prio;
1149         u8 id;
1150
1151         for (prio = 0, id = 0; prio < 8; prio++) {
1152                 if (prio_map & (1 << prio)) {
1153                         cosq[id] = cid_map[prio];
1154                         id++;
1155                         if (id == 2) /* Max 2 tcs supported */
1156                                 break;
1157                 }
1158         }
1159 }
1160
1161 static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
1162 {
1163         u8 prio_map = 0;
1164         u64 cid_map;
1165         int rc;
1166
1167         /* Get priority for roce */
1168         prio_map = bnxt_re_get_priority_mask(rdev);
1169
1170         if (prio_map == rdev->cur_prio_map)
1171                 return 0;
1172         rdev->cur_prio_map = prio_map;
1173         /* Get cosq id for this priority */
1174         rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map);
1175         if (rc) {
1176                 dev_warn(rdev_to_dev(rdev), "no cos for p_mask %x\n", prio_map);
1177                 return rc;
1178         }
1179         /* Parse CoS IDs for app priority */
1180         bnxt_re_parse_cid_map(prio_map, (u8 *)&cid_map, rdev->cosq);
1181
1182         /* Config BONO. */
1183         rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq);
1184         if (rc) {
1185                 dev_warn(rdev_to_dev(rdev), "no tc for cos{%x, %x}\n",
1186                          rdev->cosq[0], rdev->cosq[1]);
1187                 return rc;
1188         }
1189
1190         /* Actual priorities are not programmed as they are already
1191          * done by L2 driver; just enable or disable priority vlan tagging
1192          */
1193         if ((prio_map == 0 && rdev->qplib_res.prio) ||
1194             (prio_map != 0 && !rdev->qplib_res.prio)) {
1195                 rdev->qplib_res.prio = prio_map ? true : false;
1196
1197                 bnxt_re_update_gid(rdev);
1198         }
1199
1200         return 0;
1201 }
1202
1203 static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
1204 {
1205         int i, rc;
1206
1207         if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
1208                 for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++)
1209                         device_remove_file(&rdev->ibdev.dev,
1210                                            bnxt_re_attributes[i]);
1211                 /* Cleanup ib dev */
1212                 bnxt_re_unregister_ib(rdev);
1213         }
1214         if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
1215                 cancel_delayed_work(&rdev->worker);
1216
1217         if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
1218                                &rdev->flags))
1219                 bnxt_re_cleanup_res(rdev);
1220         if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
1221                 bnxt_re_free_res(rdev);
1222
1223         if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
1224                 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
1225                 if (rc)
1226                         dev_warn(rdev_to_dev(rdev),
1227                                  "Failed to deinitialize RCFW: %#x", rc);
1228                 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1229                 bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
1230                 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1231                 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
1232                 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1233         }
1234         if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
1235                 rc = bnxt_re_free_msix(rdev);
1236                 if (rc)
1237                         dev_warn(rdev_to_dev(rdev),
1238                                  "Failed to free MSI-X vectors: %#x", rc);
1239         }
1240         if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
1241                 rc = bnxt_re_unregister_netdev(rdev);
1242                 if (rc)
1243                         dev_warn(rdev_to_dev(rdev),
1244                                  "Failed to unregister with netdev: %#x", rc);
1245         }
1246 }
1247
1248 /* worker thread for polling periodic events. Now used for QoS programming*/
1249 static void bnxt_re_worker(struct work_struct *work)
1250 {
1251         struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
1252                                                 worker.work);
1253
1254         bnxt_re_setup_qos(rdev);
1255         schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1256 }
1257
1258 static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1259 {
1260         int i, j, rc;
1261
1262         bool locked;
1263
1264         /* Acquire rtnl lock through out this function */
1265         rtnl_lock();
1266         locked = true;
1267
1268         /* Registered a new RoCE device instance to netdev */
1269         rc = bnxt_re_register_netdev(rdev);
1270         if (rc) {
1271                 rtnl_unlock();
1272                 pr_err("Failed to register with netedev: %#x\n", rc);
1273                 return -EINVAL;
1274         }
1275         set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
1276
1277         /* Check whether VF or PF */
1278         bnxt_re_get_sriov_func_type(rdev);
1279
1280         rc = bnxt_re_request_msix(rdev);
1281         if (rc) {
1282                 pr_err("Failed to get MSI-X vectors: %#x\n", rc);
1283                 rc = -EINVAL;
1284                 goto fail;
1285         }
1286         set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
1287
1288         /* Establish RCFW Communication Channel to initialize the context
1289          * memory for the function and all child VFs
1290          */
1291         rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
1292                                            BNXT_RE_MAX_QPC_COUNT);
1293         if (rc) {
1294                 pr_err("Failed to allocate RCFW Channel: %#x\n", rc);
1295                 goto fail;
1296         }
1297         rc = bnxt_re_net_ring_alloc
1298                         (rdev, rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr,
1299                          rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count,
1300                          HWRM_RING_ALLOC_CMPL, BNXT_QPLIB_CREQE_MAX_CNT - 1,
1301                          rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx,
1302                          &rdev->rcfw.creq_ring_id);
1303         if (rc) {
1304                 pr_err("Failed to allocate CREQ: %#x\n", rc);
1305                 goto free_rcfw;
1306         }
1307         rc = bnxt_qplib_enable_rcfw_channel
1308                                 (rdev->en_dev->pdev, &rdev->rcfw,
1309                                  rdev->msix_entries[BNXT_RE_AEQ_IDX].vector,
1310                                  rdev->msix_entries[BNXT_RE_AEQ_IDX].db_offset,
1311                                  rdev->is_virtfn, &bnxt_re_aeq_handler);
1312         if (rc) {
1313                 pr_err("Failed to enable RCFW channel: %#x\n", rc);
1314                 goto free_ring;
1315         }
1316
1317         rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
1318                                      rdev->is_virtfn);
1319         if (rc)
1320                 goto disable_rcfw;
1321         if (!rdev->is_virtfn)
1322                 bnxt_re_set_resource_limits(rdev);
1323
1324         rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0);
1325         if (rc) {
1326                 pr_err("Failed to allocate QPLIB context: %#x\n", rc);
1327                 goto disable_rcfw;
1328         }
1329         rc = bnxt_re_net_stats_ctx_alloc(rdev,
1330                                          rdev->qplib_ctx.stats.dma_map,
1331                                          &rdev->qplib_ctx.stats.fw_id);
1332         if (rc) {
1333                 pr_err("Failed to allocate stats context: %#x\n", rc);
1334                 goto free_ctx;
1335         }
1336
1337         rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
1338                                   rdev->is_virtfn);
1339         if (rc) {
1340                 pr_err("Failed to initialize RCFW: %#x\n", rc);
1341                 goto free_sctx;
1342         }
1343         set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
1344
1345         /* Resources based on the 'new' device caps */
1346         rc = bnxt_re_alloc_res(rdev);
1347         if (rc) {
1348                 pr_err("Failed to allocate resources: %#x\n", rc);
1349                 goto fail;
1350         }
1351         set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
1352         rc = bnxt_re_init_res(rdev);
1353         if (rc) {
1354                 pr_err("Failed to initialize resources: %#x\n", rc);
1355                 goto fail;
1356         }
1357
1358         set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
1359
1360         if (!rdev->is_virtfn) {
1361                 rc = bnxt_re_setup_qos(rdev);
1362                 if (rc)
1363                         pr_info("RoCE priority not yet configured\n");
1364
1365                 INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
1366                 set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
1367                 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1368         }
1369
1370         rtnl_unlock();
1371         locked = false;
1372
1373         /* Register ib dev */
1374         rc = bnxt_re_register_ib(rdev);
1375         if (rc) {
1376                 pr_err("Failed to register with IB: %#x\n", rc);
1377                 goto fail;
1378         }
1379         set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
1380         dev_info(rdev_to_dev(rdev), "Device registered successfully");
1381         for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
1382                 rc = device_create_file(&rdev->ibdev.dev,
1383                                         bnxt_re_attributes[i]);
1384                 if (rc) {
1385                         dev_err(rdev_to_dev(rdev),
1386                                 "Failed to create IB sysfs: %#x", rc);
1387                         /* Must clean up all created device files */
1388                         for (j = 0; j < i; j++)
1389                                 device_remove_file(&rdev->ibdev.dev,
1390                                                    bnxt_re_attributes[j]);
1391                         bnxt_re_unregister_ib(rdev);
1392                         goto fail;
1393                 }
1394         }
1395         ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1396                          &rdev->active_width);
1397         set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
1398         bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
1399         bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
1400
1401         return 0;
1402 free_sctx:
1403         bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1404 free_ctx:
1405         bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
1406 disable_rcfw:
1407         bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1408 free_ring:
1409         bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
1410 free_rcfw:
1411         bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1412 fail:
1413         if (!locked)
1414                 rtnl_lock();
1415         bnxt_re_ib_unreg(rdev);
1416         rtnl_unlock();
1417
1418         return rc;
1419 }
1420
1421 static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
1422 {
1423         struct bnxt_en_dev *en_dev = rdev->en_dev;
1424         struct net_device *netdev = rdev->netdev;
1425
1426         bnxt_re_dev_remove(rdev);
1427
1428         if (netdev)
1429                 bnxt_re_dev_unprobe(netdev, en_dev);
1430 }
1431
1432 static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
1433 {
1434         struct bnxt_en_dev *en_dev;
1435         int rc = 0;
1436
1437         if (!is_bnxt_re_dev(netdev))
1438                 return -ENODEV;
1439
1440         en_dev = bnxt_re_dev_probe(netdev);
1441         if (IS_ERR(en_dev)) {
1442                 if (en_dev != ERR_PTR(-ENODEV))
1443                         pr_err("%s: Failed to probe\n", ROCE_DRV_MODULE_NAME);
1444                 rc = PTR_ERR(en_dev);
1445                 goto exit;
1446         }
1447         *rdev = bnxt_re_dev_add(netdev, en_dev);
1448         if (!*rdev) {
1449                 rc = -ENOMEM;
1450                 bnxt_re_dev_unprobe(netdev, en_dev);
1451                 goto exit;
1452         }
1453 exit:
1454         return rc;
1455 }
1456
1457 static void bnxt_re_remove_one(struct bnxt_re_dev *rdev)
1458 {
1459         pci_dev_put(rdev->en_dev->pdev);
1460 }
1461
1462 /* Handle all deferred netevents tasks */
1463 static void bnxt_re_task(struct work_struct *work)
1464 {
1465         struct bnxt_re_work *re_work;
1466         struct bnxt_re_dev *rdev;
1467         int rc = 0;
1468
1469         re_work = container_of(work, struct bnxt_re_work, work);
1470         rdev = re_work->rdev;
1471
1472         if (re_work->event != NETDEV_REGISTER &&
1473             !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
1474                 return;
1475
1476         switch (re_work->event) {
1477         case NETDEV_REGISTER:
1478                 rc = bnxt_re_ib_reg(rdev);
1479                 if (rc) {
1480                         dev_err(rdev_to_dev(rdev),
1481                                 "Failed to register with IB: %#x", rc);
1482                         bnxt_re_remove_one(rdev);
1483                         bnxt_re_dev_unreg(rdev);
1484                         goto exit;
1485                 }
1486                 break;
1487         case NETDEV_UP:
1488                 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1489                                        IB_EVENT_PORT_ACTIVE);
1490                 break;
1491         case NETDEV_DOWN:
1492                 bnxt_re_dev_stop(rdev);
1493                 break;
1494         case NETDEV_CHANGE:
1495                 if (!netif_carrier_ok(rdev->netdev))
1496                         bnxt_re_dev_stop(rdev);
1497                 else if (netif_carrier_ok(rdev->netdev))
1498                         bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1499                                                IB_EVENT_PORT_ACTIVE);
1500                 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1501                                  &rdev->active_width);
1502                 break;
1503         default:
1504                 break;
1505         }
1506         smp_mb__before_atomic();
1507         atomic_dec(&rdev->sched_count);
1508 exit:
1509         kfree(re_work);
1510 }
1511
1512 static void bnxt_re_init_one(struct bnxt_re_dev *rdev)
1513 {
1514         pci_dev_get(rdev->en_dev->pdev);
1515 }
1516
1517 /*
1518  * "Notifier chain callback can be invoked for the same chain from
1519  * different CPUs at the same time".
1520  *
1521  * For cases when the netdev is already present, our call to the
1522  * register_netdevice_notifier() will actually get the rtnl_lock()
1523  * before sending NETDEV_REGISTER and (if up) NETDEV_UP
1524  * events.
1525  *
1526  * But for cases when the netdev is not already present, the notifier
1527  * chain is subjected to be invoked from different CPUs simultaneously.
1528  *
1529  * This is protected by the netdev_mutex.
1530  */
1531 static int bnxt_re_netdev_event(struct notifier_block *notifier,
1532                                 unsigned long event, void *ptr)
1533 {
1534         struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
1535         struct bnxt_re_work *re_work;
1536         struct bnxt_re_dev *rdev;
1537         int rc = 0;
1538         bool sch_work = false;
1539
1540         real_dev = rdma_vlan_dev_real_dev(netdev);
1541         if (!real_dev)
1542                 real_dev = netdev;
1543
1544         rdev = bnxt_re_from_netdev(real_dev);
1545         if (!rdev && event != NETDEV_REGISTER)
1546                 goto exit;
1547         if (real_dev != netdev)
1548                 goto exit;
1549
1550         switch (event) {
1551         case NETDEV_REGISTER:
1552                 if (rdev)
1553                         break;
1554                 rc = bnxt_re_dev_reg(&rdev, real_dev);
1555                 if (rc == -ENODEV)
1556                         break;
1557                 if (rc) {
1558                         pr_err("Failed to register with the device %s: %#x\n",
1559                                real_dev->name, rc);
1560                         break;
1561                 }
1562                 bnxt_re_init_one(rdev);
1563                 sch_work = true;
1564                 break;
1565
1566         case NETDEV_UNREGISTER:
1567                 /* netdev notifier will call NETDEV_UNREGISTER again later since
1568                  * we are still holding the reference to the netdev
1569                  */
1570                 if (atomic_read(&rdev->sched_count) > 0)
1571                         goto exit;
1572                 bnxt_re_ib_unreg(rdev);
1573                 bnxt_re_remove_one(rdev);
1574                 bnxt_re_dev_unreg(rdev);
1575                 break;
1576
1577         default:
1578                 sch_work = true;
1579                 break;
1580         }
1581         if (sch_work) {
1582                 /* Allocate for the deferred task */
1583                 re_work = kzalloc(sizeof(*re_work), GFP_ATOMIC);
1584                 if (re_work) {
1585                         re_work->rdev = rdev;
1586                         re_work->event = event;
1587                         re_work->vlan_dev = (real_dev == netdev ?
1588                                              NULL : netdev);
1589                         INIT_WORK(&re_work->work, bnxt_re_task);
1590                         atomic_inc(&rdev->sched_count);
1591                         queue_work(bnxt_re_wq, &re_work->work);
1592                 }
1593         }
1594
1595 exit:
1596         return NOTIFY_DONE;
1597 }
1598
1599 static struct notifier_block bnxt_re_netdev_notifier = {
1600         .notifier_call = bnxt_re_netdev_event
1601 };
1602
1603 static int __init bnxt_re_mod_init(void)
1604 {
1605         int rc = 0;
1606
1607         pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
1608
1609         bnxt_re_wq = create_singlethread_workqueue("bnxt_re");
1610         if (!bnxt_re_wq)
1611                 return -ENOMEM;
1612
1613         INIT_LIST_HEAD(&bnxt_re_dev_list);
1614
1615         rc = register_netdevice_notifier(&bnxt_re_netdev_notifier);
1616         if (rc) {
1617                 pr_err("%s: Cannot register to netdevice_notifier",
1618                        ROCE_DRV_MODULE_NAME);
1619                 goto err_netdev;
1620         }
1621         return 0;
1622
1623 err_netdev:
1624         destroy_workqueue(bnxt_re_wq);
1625
1626         return rc;
1627 }
1628
1629 static void __exit bnxt_re_mod_exit(void)
1630 {
1631         struct bnxt_re_dev *rdev, *next;
1632         LIST_HEAD(to_be_deleted);
1633
1634         mutex_lock(&bnxt_re_dev_lock);
1635         /* Free all adapter allocated resources */
1636         if (!list_empty(&bnxt_re_dev_list))
1637                 list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
1638         mutex_unlock(&bnxt_re_dev_lock);
1639        /*
1640         * Cleanup the devices in reverse order so that the VF device
1641         * cleanup is done before PF cleanup
1642         */
1643         list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
1644                 dev_info(rdev_to_dev(rdev), "Unregistering Device");
1645                 /*
1646                  * Flush out any scheduled tasks before destroying the
1647                  * resources
1648                  */
1649                 flush_workqueue(bnxt_re_wq);
1650                 bnxt_re_dev_stop(rdev);
1651                 /* Acquire the rtnl_lock as the L2 resources are freed here */
1652                 rtnl_lock();
1653                 bnxt_re_ib_unreg(rdev);
1654                 rtnl_unlock();
1655                 bnxt_re_remove_one(rdev);
1656                 bnxt_re_dev_unreg(rdev);
1657         }
1658         unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
1659         if (bnxt_re_wq)
1660                 destroy_workqueue(bnxt_re_wq);
1661 }
1662
1663 module_init(bnxt_re_mod_init);
1664 module_exit(bnxt_re_mod_exit);