GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / infiniband / hw / usnic / usnic_ib_verbs.c
1 /*
2  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/errno.h>
37
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_addr.h>
40
41 #include "usnic_abi.h"
42 #include "usnic_ib.h"
43 #include "usnic_common_util.h"
44 #include "usnic_ib_qp_grp.h"
45 #include "usnic_fwd.h"
46 #include "usnic_log.h"
47 #include "usnic_uiom.h"
48 #include "usnic_transport.h"
49 #include "usnic_ib_verbs.h"
50
51 #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
52
53 static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
54 {
55         *fw_ver = *((u64 *)fw_ver_str);
56 }
57
58 static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
59                                         struct ib_udata *udata)
60 {
61         struct usnic_ib_dev *us_ibdev;
62         struct usnic_ib_create_qp_resp resp;
63         struct pci_dev *pdev;
64         struct vnic_dev_bar *bar;
65         struct usnic_vnic_res_chunk *chunk;
66         struct usnic_ib_qp_grp_flow *default_flow;
67         int i, err;
68
69         memset(&resp, 0, sizeof(resp));
70
71         us_ibdev = qp_grp->vf->pf;
72         pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
73         if (!pdev) {
74                 usnic_err("Failed to get pdev of qp_grp %d\n",
75                                 qp_grp->grp_id);
76                 return -EFAULT;
77         }
78
79         bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
80         if (!bar) {
81                 usnic_err("Failed to get bar0 of qp_grp %d vf %s",
82                                 qp_grp->grp_id, pci_name(pdev));
83                 return -EFAULT;
84         }
85
86         resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
87         resp.bar_bus_addr = bar->bus_addr;
88         resp.bar_len = bar->len;
89
90         chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
91         if (IS_ERR(chunk)) {
92                 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
93                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
94                         qp_grp->grp_id,
95                         PTR_ERR(chunk));
96                 return PTR_ERR(chunk);
97         }
98
99         WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
100         resp.rq_cnt = chunk->cnt;
101         for (i = 0; i < chunk->cnt; i++)
102                 resp.rq_idx[i] = chunk->res[i]->vnic_idx;
103
104         chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
105         if (IS_ERR(chunk)) {
106                 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
107                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
108                         qp_grp->grp_id,
109                         PTR_ERR(chunk));
110                 return PTR_ERR(chunk);
111         }
112
113         WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
114         resp.wq_cnt = chunk->cnt;
115         for (i = 0; i < chunk->cnt; i++)
116                 resp.wq_idx[i] = chunk->res[i]->vnic_idx;
117
118         chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
119         if (IS_ERR(chunk)) {
120                 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
121                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
122                         qp_grp->grp_id,
123                         PTR_ERR(chunk));
124                 return PTR_ERR(chunk);
125         }
126
127         WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
128         resp.cq_cnt = chunk->cnt;
129         for (i = 0; i < chunk->cnt; i++)
130                 resp.cq_idx[i] = chunk->res[i]->vnic_idx;
131
132         default_flow = list_first_entry(&qp_grp->flows_lst,
133                                         struct usnic_ib_qp_grp_flow, link);
134         resp.transport = default_flow->trans_type;
135
136         err = ib_copy_to_udata(udata, &resp, sizeof(resp));
137         if (err) {
138                 usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
139                 return err;
140         }
141
142         return 0;
143 }
144
145 static struct usnic_ib_qp_grp*
146 find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
147                                 struct usnic_ib_pd *pd,
148                                 struct usnic_transport_spec *trans_spec,
149                                 struct usnic_vnic_res_spec *res_spec)
150 {
151         struct usnic_ib_vf *vf;
152         struct usnic_vnic *vnic;
153         struct usnic_ib_qp_grp *qp_grp;
154         struct device *dev, **dev_list;
155         int i;
156
157         BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
158
159         if (list_empty(&us_ibdev->vf_dev_list)) {
160                 usnic_info("No vfs to allocate\n");
161                 return NULL;
162         }
163
164         if (usnic_ib_share_vf) {
165                 /* Try to find resouces on a used vf which is in pd */
166                 dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
167                 if (IS_ERR(dev_list))
168                         return ERR_CAST(dev_list);
169                 for (i = 0; dev_list[i]; i++) {
170                         dev = dev_list[i];
171                         vf = pci_get_drvdata(to_pci_dev(dev));
172                         spin_lock(&vf->lock);
173                         vnic = vf->vnic;
174                         if (!usnic_vnic_check_room(vnic, res_spec)) {
175                                 usnic_dbg("Found used vnic %s from %s\n",
176                                                 us_ibdev->ib_dev.name,
177                                                 pci_name(usnic_vnic_get_pdev(
178                                                                         vnic)));
179                                 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev,
180                                                                 vf, pd,
181                                                                 res_spec,
182                                                                 trans_spec);
183
184                                 spin_unlock(&vf->lock);
185                                 goto qp_grp_check;
186                         }
187                         spin_unlock(&vf->lock);
188
189                 }
190                 usnic_uiom_free_dev_list(dev_list);
191                 dev_list = NULL;
192         }
193
194         /* Try to find resources on an unused vf */
195         list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
196                 spin_lock(&vf->lock);
197                 vnic = vf->vnic;
198                 if (vf->qp_grp_ref_cnt == 0 &&
199                     usnic_vnic_check_room(vnic, res_spec) == 0) {
200                         qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf,
201                                                         pd, res_spec,
202                                                         trans_spec);
203
204                         spin_unlock(&vf->lock);
205                         goto qp_grp_check;
206                 }
207                 spin_unlock(&vf->lock);
208         }
209
210         usnic_info("No free qp grp found on %s\n", us_ibdev->ib_dev.name);
211         return ERR_PTR(-ENOMEM);
212
213 qp_grp_check:
214         if (IS_ERR_OR_NULL(qp_grp)) {
215                 usnic_err("Failed to allocate qp_grp\n");
216                 if (usnic_ib_share_vf)
217                         usnic_uiom_free_dev_list(dev_list);
218                 return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
219         }
220         return qp_grp;
221 }
222
223 static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
224 {
225         struct usnic_ib_vf *vf = qp_grp->vf;
226
227         WARN_ON(qp_grp->state != IB_QPS_RESET);
228
229         spin_lock(&vf->lock);
230         usnic_ib_qp_grp_destroy(qp_grp);
231         spin_unlock(&vf->lock);
232 }
233
234 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
235 {
236         if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
237                         cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
238                 return -EINVAL;
239
240         return 0;
241 }
242
243 /* Start of ib callback functions */
244
245 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
246                                                 u8 port_num)
247 {
248         return IB_LINK_LAYER_ETHERNET;
249 }
250
251 int usnic_ib_query_device(struct ib_device *ibdev,
252                           struct ib_device_attr *props,
253                           struct ib_udata *uhw)
254 {
255         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
256         union ib_gid gid;
257         struct ethtool_drvinfo info;
258         int qp_per_vf;
259
260         usnic_dbg("\n");
261         if (uhw->inlen || uhw->outlen)
262                 return -EINVAL;
263
264         mutex_lock(&us_ibdev->usdev_lock);
265         us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
266         memset(props, 0, sizeof(*props));
267         usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
268                         &gid.raw[0]);
269         memcpy(&props->sys_image_guid, &gid.global.interface_id,
270                 sizeof(gid.global.interface_id));
271         usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
272         props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
273         props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
274         props->vendor_id = PCI_VENDOR_ID_CISCO;
275         props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
276         props->hw_ver = us_ibdev->pdev->subsystem_device;
277         qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
278                         us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
279         props->max_qp = qp_per_vf *
280                 kref_read(&us_ibdev->vf_cnt);
281         props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
282                 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
283         props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
284                 kref_read(&us_ibdev->vf_cnt);
285         props->max_pd = USNIC_UIOM_MAX_PD_CNT;
286         props->max_mr = USNIC_UIOM_MAX_MR_CNT;
287         props->local_ca_ack_delay = 0;
288         props->max_pkeys = 0;
289         props->atomic_cap = IB_ATOMIC_NONE;
290         props->masked_atomic_cap = props->atomic_cap;
291         props->max_qp_rd_atom = 0;
292         props->max_qp_init_rd_atom = 0;
293         props->max_res_rd_atom = 0;
294         props->max_srq = 0;
295         props->max_srq_wr = 0;
296         props->max_srq_sge = 0;
297         props->max_fast_reg_page_list_len = 0;
298         props->max_mcast_grp = 0;
299         props->max_mcast_qp_attach = 0;
300         props->max_total_mcast_qp_attach = 0;
301         props->max_map_per_fmr = 0;
302         /* Owned by Userspace
303          * max_qp_wr, max_sge, max_sge_rd, max_cqe */
304         mutex_unlock(&us_ibdev->usdev_lock);
305
306         return 0;
307 }
308
309 int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
310                                 struct ib_port_attr *props)
311 {
312         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
313
314         usnic_dbg("\n");
315
316         if (ib_get_eth_speed(ibdev, port, &props->active_speed,
317                              &props->active_width))
318                 return -EINVAL;
319
320         /*
321          * usdev_lock is acquired after (and not before) ib_get_eth_speed call
322          * because acquiring rtnl_lock in ib_get_eth_speed, while holding
323          * usdev_lock could lead to a deadlock.
324          */
325         mutex_lock(&us_ibdev->usdev_lock);
326         /* props being zeroed by the caller, avoid zeroing it here */
327
328         props->lid = 0;
329         props->lmc = 1;
330         props->sm_lid = 0;
331         props->sm_sl = 0;
332
333         if (!us_ibdev->ufdev->link_up) {
334                 props->state = IB_PORT_DOWN;
335                 props->phys_state = 3;
336         } else if (!us_ibdev->ufdev->inaddr) {
337                 props->state = IB_PORT_INIT;
338                 props->phys_state = 4;
339         } else {
340                 props->state = IB_PORT_ACTIVE;
341                 props->phys_state = 5;
342         }
343
344         props->port_cap_flags = 0;
345         props->gid_tbl_len = 1;
346         props->pkey_tbl_len = 1;
347         props->bad_pkey_cntr = 0;
348         props->qkey_viol_cntr = 0;
349         props->max_mtu = IB_MTU_4096;
350         props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
351         /* Userspace will adjust for hdrs */
352         props->max_msg_sz = us_ibdev->ufdev->mtu;
353         props->max_vl_num = 1;
354         mutex_unlock(&us_ibdev->usdev_lock);
355
356         return 0;
357 }
358
359 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
360                                 int qp_attr_mask,
361                                 struct ib_qp_init_attr *qp_init_attr)
362 {
363         struct usnic_ib_qp_grp *qp_grp;
364         struct usnic_ib_vf *vf;
365         int err;
366
367         usnic_dbg("\n");
368
369         memset(qp_attr, 0, sizeof(*qp_attr));
370         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
371
372         qp_grp = to_uqp_grp(qp);
373         vf = qp_grp->vf;
374         mutex_lock(&vf->pf->usdev_lock);
375         usnic_dbg("\n");
376         qp_attr->qp_state = qp_grp->state;
377         qp_attr->cur_qp_state = qp_grp->state;
378
379         switch (qp_grp->ibqp.qp_type) {
380         case IB_QPT_UD:
381                 qp_attr->qkey = 0;
382                 break;
383         default:
384                 usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
385                 err = -EINVAL;
386                 goto err_out;
387         }
388
389         mutex_unlock(&vf->pf->usdev_lock);
390         return 0;
391
392 err_out:
393         mutex_unlock(&vf->pf->usdev_lock);
394         return err;
395 }
396
397 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
398                                 union ib_gid *gid)
399 {
400
401         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
402         usnic_dbg("\n");
403
404         if (index > 1)
405                 return -EINVAL;
406
407         mutex_lock(&us_ibdev->usdev_lock);
408         memset(&(gid->raw[0]), 0, sizeof(gid->raw));
409         usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
410                         &gid->raw[0]);
411         mutex_unlock(&us_ibdev->usdev_lock);
412
413         return 0;
414 }
415
416 struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num)
417 {
418         struct usnic_ib_dev *us_ibdev = to_usdev(device);
419
420         if (us_ibdev->netdev)
421                 dev_hold(us_ibdev->netdev);
422
423         return us_ibdev->netdev;
424 }
425
426 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
427                                 u16 *pkey)
428 {
429         if (index > 0)
430                 return -EINVAL;
431
432         *pkey = 0xffff;
433         return 0;
434 }
435
436 struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
437                                         struct ib_ucontext *context,
438                                         struct ib_udata *udata)
439 {
440         struct usnic_ib_pd *pd;
441         void *umem_pd;
442
443         usnic_dbg("\n");
444
445         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
446         if (!pd)
447                 return ERR_PTR(-ENOMEM);
448
449         umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
450         if (IS_ERR_OR_NULL(umem_pd)) {
451                 kfree(pd);
452                 return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
453         }
454
455         usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
456                         pd, context, ibdev->name);
457         return &pd->ibpd;
458 }
459
460 int usnic_ib_dealloc_pd(struct ib_pd *pd)
461 {
462         usnic_info("freeing domain 0x%p\n", pd);
463
464         usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
465         kfree(pd);
466         return 0;
467 }
468
469 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
470                                         struct ib_qp_init_attr *init_attr,
471                                         struct ib_udata *udata)
472 {
473         int err;
474         struct usnic_ib_dev *us_ibdev;
475         struct usnic_ib_qp_grp *qp_grp;
476         struct usnic_ib_ucontext *ucontext;
477         int cq_cnt;
478         struct usnic_vnic_res_spec res_spec;
479         struct usnic_ib_create_qp_cmd cmd;
480         struct usnic_transport_spec trans_spec;
481
482         usnic_dbg("\n");
483
484         ucontext = to_uucontext(pd->uobject->context);
485         us_ibdev = to_usdev(pd->device);
486
487         if (init_attr->create_flags)
488                 return ERR_PTR(-EINVAL);
489
490         err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
491         if (err) {
492                 usnic_err("%s: cannot copy udata for create_qp\n",
493                                 us_ibdev->ib_dev.name);
494                 return ERR_PTR(-EINVAL);
495         }
496
497         err = create_qp_validate_user_data(cmd);
498         if (err) {
499                 usnic_err("%s: Failed to validate user data\n",
500                                 us_ibdev->ib_dev.name);
501                 return ERR_PTR(-EINVAL);
502         }
503
504         if (init_attr->qp_type != IB_QPT_UD) {
505                 usnic_err("%s asked to make a non-UD QP: %d\n",
506                                 us_ibdev->ib_dev.name, init_attr->qp_type);
507                 return ERR_PTR(-EINVAL);
508         }
509
510         trans_spec = cmd.spec;
511         mutex_lock(&us_ibdev->usdev_lock);
512         cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
513         res_spec = min_transport_spec[trans_spec.trans_type];
514         usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
515         qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
516                                                 &trans_spec,
517                                                 &res_spec);
518         if (IS_ERR_OR_NULL(qp_grp)) {
519                 err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
520                 goto out_release_mutex;
521         }
522
523         err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
524         if (err) {
525                 err = -EBUSY;
526                 goto out_release_qp_grp;
527         }
528
529         qp_grp->ctx = ucontext;
530         list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
531         usnic_ib_log_vf(qp_grp->vf);
532         mutex_unlock(&us_ibdev->usdev_lock);
533         return &qp_grp->ibqp;
534
535 out_release_qp_grp:
536         qp_grp_destroy(qp_grp);
537 out_release_mutex:
538         mutex_unlock(&us_ibdev->usdev_lock);
539         return ERR_PTR(err);
540 }
541
542 int usnic_ib_destroy_qp(struct ib_qp *qp)
543 {
544         struct usnic_ib_qp_grp *qp_grp;
545         struct usnic_ib_vf *vf;
546
547         usnic_dbg("\n");
548
549         qp_grp = to_uqp_grp(qp);
550         vf = qp_grp->vf;
551         mutex_lock(&vf->pf->usdev_lock);
552         if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
553                 usnic_err("Failed to move qp grp %u to reset\n",
554                                 qp_grp->grp_id);
555         }
556
557         list_del(&qp_grp->link);
558         qp_grp_destroy(qp_grp);
559         mutex_unlock(&vf->pf->usdev_lock);
560
561         return 0;
562 }
563
564 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
565                                 int attr_mask, struct ib_udata *udata)
566 {
567         struct usnic_ib_qp_grp *qp_grp;
568         int status;
569         usnic_dbg("\n");
570
571         qp_grp = to_uqp_grp(ibqp);
572
573         mutex_lock(&qp_grp->vf->pf->usdev_lock);
574         if ((attr_mask & IB_QP_PORT) && attr->port_num != 1) {
575                 /* usnic devices only have one port */
576                 status = -EINVAL;
577                 goto out_unlock;
578         }
579         if (attr_mask & IB_QP_STATE) {
580                 status = usnic_ib_qp_grp_modify(qp_grp, attr->qp_state, NULL);
581         } else {
582                 usnic_err("Unhandled request, attr_mask=0x%x\n", attr_mask);
583                 status = -EINVAL;
584         }
585
586 out_unlock:
587         mutex_unlock(&qp_grp->vf->pf->usdev_lock);
588         return status;
589 }
590
591 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
592                                  const struct ib_cq_init_attr *attr,
593                                  struct ib_ucontext *context,
594                                  struct ib_udata *udata)
595 {
596         struct ib_cq *cq;
597
598         usnic_dbg("\n");
599         if (attr->flags)
600                 return ERR_PTR(-EINVAL);
601
602         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
603         if (!cq)
604                 return ERR_PTR(-EBUSY);
605
606         return cq;
607 }
608
609 int usnic_ib_destroy_cq(struct ib_cq *cq)
610 {
611         usnic_dbg("\n");
612         kfree(cq);
613         return 0;
614 }
615
616 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
617                                         u64 virt_addr, int access_flags,
618                                         struct ib_udata *udata)
619 {
620         struct usnic_ib_mr *mr;
621         int err;
622
623         usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
624                         virt_addr, length);
625
626         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
627         if (!mr)
628                 return ERR_PTR(-ENOMEM);
629
630         mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
631                                         access_flags, 0);
632         if (IS_ERR_OR_NULL(mr->umem)) {
633                 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
634                 goto err_free;
635         }
636
637         mr->ibmr.lkey = mr->ibmr.rkey = 0;
638         return &mr->ibmr;
639
640 err_free:
641         kfree(mr);
642         return ERR_PTR(err);
643 }
644
645 int usnic_ib_dereg_mr(struct ib_mr *ibmr)
646 {
647         struct usnic_ib_mr *mr = to_umr(ibmr);
648
649         usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
650
651         usnic_uiom_reg_release(mr->umem, ibmr->uobject->context);
652         kfree(mr);
653         return 0;
654 }
655
656 struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
657                                                         struct ib_udata *udata)
658 {
659         struct usnic_ib_ucontext *context;
660         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
661         usnic_dbg("\n");
662
663         context = kmalloc(sizeof(*context), GFP_KERNEL);
664         if (!context)
665                 return ERR_PTR(-ENOMEM);
666
667         INIT_LIST_HEAD(&context->qp_grp_list);
668         mutex_lock(&us_ibdev->usdev_lock);
669         list_add_tail(&context->link, &us_ibdev->ctx_list);
670         mutex_unlock(&us_ibdev->usdev_lock);
671
672         return &context->ibucontext;
673 }
674
675 int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
676 {
677         struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
678         struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
679         usnic_dbg("\n");
680
681         mutex_lock(&us_ibdev->usdev_lock);
682         BUG_ON(!list_empty(&context->qp_grp_list));
683         list_del(&context->link);
684         mutex_unlock(&us_ibdev->usdev_lock);
685         kfree(context);
686         return 0;
687 }
688
689 int usnic_ib_mmap(struct ib_ucontext *context,
690                                 struct vm_area_struct *vma)
691 {
692         struct usnic_ib_ucontext *uctx = to_ucontext(context);
693         struct usnic_ib_dev *us_ibdev;
694         struct usnic_ib_qp_grp *qp_grp;
695         struct usnic_ib_vf *vf;
696         struct vnic_dev_bar *bar;
697         dma_addr_t bus_addr;
698         unsigned int len;
699         unsigned int vfid;
700
701         usnic_dbg("\n");
702
703         us_ibdev = to_usdev(context->device);
704         vma->vm_flags |= VM_IO;
705         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
706         vfid = vma->vm_pgoff;
707         usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
708                         vma->vm_pgoff, PAGE_SHIFT, vfid);
709
710         mutex_lock(&us_ibdev->usdev_lock);
711         list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
712                 vf = qp_grp->vf;
713                 if (usnic_vnic_get_index(vf->vnic) == vfid) {
714                         bar = usnic_vnic_get_bar(vf->vnic, 0);
715                         if ((vma->vm_end - vma->vm_start) != bar->len) {
716                                 usnic_err("Bar0 Len %lu - Request map %lu\n",
717                                                 bar->len,
718                                                 vma->vm_end - vma->vm_start);
719                                 mutex_unlock(&us_ibdev->usdev_lock);
720                                 return -EINVAL;
721                         }
722                         bus_addr = bar->bus_addr;
723                         len = bar->len;
724                         usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
725                                         &bus_addr, bar->vaddr, bar->len);
726                         mutex_unlock(&us_ibdev->usdev_lock);
727
728                         return remap_pfn_range(vma,
729                                                 vma->vm_start,
730                                                 bus_addr >> PAGE_SHIFT,
731                                                 len, vma->vm_page_prot);
732                 }
733         }
734
735         mutex_unlock(&us_ibdev->usdev_lock);
736         usnic_err("No VF %u found\n", vfid);
737         return -EINVAL;
738 }
739
740 /* In ib callbacks section -  Start of stub funcs */
741 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
742                                  struct rdma_ah_attr *ah_attr,
743                                  struct ib_udata *udata)
744
745 {
746         usnic_dbg("\n");
747         return ERR_PTR(-EPERM);
748 }
749
750 int usnic_ib_destroy_ah(struct ib_ah *ah)
751 {
752         usnic_dbg("\n");
753         return -EINVAL;
754 }
755
756 int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
757                                 struct ib_send_wr **bad_wr)
758 {
759         usnic_dbg("\n");
760         return -EINVAL;
761 }
762
763 int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
764                                 struct ib_recv_wr **bad_wr)
765 {
766         usnic_dbg("\n");
767         return -EINVAL;
768 }
769
770 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
771                                 struct ib_wc *wc)
772 {
773         usnic_dbg("\n");
774         return -EINVAL;
775 }
776
777 int usnic_ib_req_notify_cq(struct ib_cq *cq,
778                                         enum ib_cq_notify_flags flags)
779 {
780         usnic_dbg("\n");
781         return -EINVAL;
782 }
783
784 struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
785 {
786         usnic_dbg("\n");
787         return ERR_PTR(-ENOMEM);
788 }
789
790
791 /* In ib callbacks section - End of stub funcs */
792 /* End of ib callbacks section */