GNU Linux-libre 4.4.288-gnu1
[releases.git] / drivers / infiniband / hw / mlx4 / main.c
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <net/ipv6.h>
43 #include <net/addrconf.h>
44
45 #include <rdma/ib_smi.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_addr.h>
48 #include <rdma/ib_cache.h>
49
50 #include <net/bonding.h>
51
52 #include <linux/mlx4/driver.h>
53 #include <linux/mlx4/cmd.h>
54 #include <linux/mlx4/qp.h>
55
56 #include "mlx4_ib.h"
57 #include "user.h"
58
59 #define DRV_NAME        MLX4_IB_DRV_NAME
60 #define DRV_VERSION     "2.2-1"
61 #define DRV_RELDATE     "Feb 2014"
62
63 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
64 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
65 #define MLX4_IB_CARD_REV_A0   0xA0
66
67 MODULE_AUTHOR("Roland Dreier");
68 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
69 MODULE_LICENSE("Dual BSD/GPL");
70 MODULE_VERSION(DRV_VERSION);
71
72 int mlx4_ib_sm_guid_assign = 0;
73 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
74 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
75
76 static const char mlx4_ib_version[] =
77         DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
78         DRV_VERSION " (" DRV_RELDATE ")\n";
79
80 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
81
82 static struct workqueue_struct *wq;
83
84 static void init_query_mad(struct ib_smp *mad)
85 {
86         mad->base_version  = 1;
87         mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
88         mad->class_version = 1;
89         mad->method        = IB_MGMT_METHOD_GET;
90 }
91
92 static int check_flow_steering_support(struct mlx4_dev *dev)
93 {
94         int eth_num_ports = 0;
95         int ib_num_ports = 0;
96
97         int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
98
99         if (dmfs) {
100                 int i;
101                 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
102                         eth_num_ports++;
103                 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
104                         ib_num_ports++;
105                 dmfs &= (!ib_num_ports ||
106                          (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
107                         (!eth_num_ports ||
108                          (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
109                 if (ib_num_ports && mlx4_is_mfunc(dev)) {
110                         pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
111                         dmfs = 0;
112                 }
113         }
114         return dmfs;
115 }
116
117 static int num_ib_ports(struct mlx4_dev *dev)
118 {
119         int ib_ports = 0;
120         int i;
121
122         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
123                 ib_ports++;
124
125         return ib_ports;
126 }
127
128 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
129 {
130         struct mlx4_ib_dev *ibdev = to_mdev(device);
131         struct net_device *dev;
132
133         rcu_read_lock();
134         dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
135
136         if (dev) {
137                 if (mlx4_is_bonded(ibdev->dev)) {
138                         struct net_device *upper = NULL;
139
140                         upper = netdev_master_upper_dev_get_rcu(dev);
141                         if (upper) {
142                                 struct net_device *active;
143
144                                 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
145                                 if (active)
146                                         dev = active;
147                         }
148                 }
149         }
150         if (dev)
151                 dev_hold(dev);
152
153         rcu_read_unlock();
154         return dev;
155 }
156
157 static int mlx4_ib_update_gids(struct gid_entry *gids,
158                                struct mlx4_ib_dev *ibdev,
159                                u8 port_num)
160 {
161         struct mlx4_cmd_mailbox *mailbox;
162         int err;
163         struct mlx4_dev *dev = ibdev->dev;
164         int i;
165         union ib_gid *gid_tbl;
166
167         mailbox = mlx4_alloc_cmd_mailbox(dev);
168         if (IS_ERR(mailbox))
169                 return -ENOMEM;
170
171         gid_tbl = mailbox->buf;
172
173         for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
174                 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
175
176         err = mlx4_cmd(dev, mailbox->dma,
177                        MLX4_SET_PORT_GID_TABLE << 8 | port_num,
178                        1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
179                        MLX4_CMD_WRAPPED);
180         if (mlx4_is_bonded(dev))
181                 err += mlx4_cmd(dev, mailbox->dma,
182                                 MLX4_SET_PORT_GID_TABLE << 8 | 2,
183                                 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
184                                 MLX4_CMD_WRAPPED);
185
186         mlx4_free_cmd_mailbox(dev, mailbox);
187         return err;
188 }
189
190 static int mlx4_ib_add_gid(struct ib_device *device,
191                            u8 port_num,
192                            unsigned int index,
193                            const union ib_gid *gid,
194                            const struct ib_gid_attr *attr,
195                            void **context)
196 {
197         struct mlx4_ib_dev *ibdev = to_mdev(device);
198         struct mlx4_ib_iboe *iboe = &ibdev->iboe;
199         struct mlx4_port_gid_table   *port_gid_table;
200         int free = -1, found = -1;
201         int ret = 0;
202         int hw_update = 0;
203         int i;
204         struct gid_entry *gids = NULL;
205
206         if (!rdma_cap_roce_gid_table(device, port_num))
207                 return -EINVAL;
208
209         if (port_num > MLX4_MAX_PORTS)
210                 return -EINVAL;
211
212         if (!context)
213                 return -EINVAL;
214
215         port_gid_table = &iboe->gids[port_num - 1];
216         spin_lock_bh(&iboe->lock);
217         for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
218                 if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid))) {
219                         found = i;
220                         break;
221                 }
222                 if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
223                         free = i; /* HW has space */
224         }
225
226         if (found < 0) {
227                 if (free < 0) {
228                         ret = -ENOSPC;
229                 } else {
230                         port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
231                         if (!port_gid_table->gids[free].ctx) {
232                                 ret = -ENOMEM;
233                         } else {
234                                 *context = port_gid_table->gids[free].ctx;
235                                 memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
236                                 port_gid_table->gids[free].ctx->real_index = free;
237                                 port_gid_table->gids[free].ctx->refcount = 1;
238                                 hw_update = 1;
239                         }
240                 }
241         } else {
242                 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
243                 *context = ctx;
244                 ctx->refcount++;
245         }
246         if (!ret && hw_update) {
247                 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
248                 if (!gids) {
249                         ret = -ENOMEM;
250                 } else {
251                         for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
252                                 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
253                 }
254         }
255         spin_unlock_bh(&iboe->lock);
256
257         if (!ret && hw_update) {
258                 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
259                 kfree(gids);
260         }
261
262         return ret;
263 }
264
265 static int mlx4_ib_del_gid(struct ib_device *device,
266                            u8 port_num,
267                            unsigned int index,
268                            void **context)
269 {
270         struct gid_cache_context *ctx = *context;
271         struct mlx4_ib_dev *ibdev = to_mdev(device);
272         struct mlx4_ib_iboe *iboe = &ibdev->iboe;
273         struct mlx4_port_gid_table   *port_gid_table;
274         int ret = 0;
275         int hw_update = 0;
276         struct gid_entry *gids = NULL;
277
278         if (!rdma_cap_roce_gid_table(device, port_num))
279                 return -EINVAL;
280
281         if (port_num > MLX4_MAX_PORTS)
282                 return -EINVAL;
283
284         port_gid_table = &iboe->gids[port_num - 1];
285         spin_lock_bh(&iboe->lock);
286         if (ctx) {
287                 ctx->refcount--;
288                 if (!ctx->refcount) {
289                         unsigned int real_index = ctx->real_index;
290
291                         memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
292                         kfree(port_gid_table->gids[real_index].ctx);
293                         port_gid_table->gids[real_index].ctx = NULL;
294                         hw_update = 1;
295                 }
296         }
297         if (!ret && hw_update) {
298                 int i;
299
300                 gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
301                 if (!gids) {
302                         ret = -ENOMEM;
303                 } else {
304                         for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
305                                 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
306                 }
307         }
308         spin_unlock_bh(&iboe->lock);
309
310         if (!ret && hw_update) {
311                 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
312                 kfree(gids);
313         }
314         return ret;
315 }
316
317 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
318                                     u8 port_num, int index)
319 {
320         struct mlx4_ib_iboe *iboe = &ibdev->iboe;
321         struct gid_cache_context *ctx = NULL;
322         union ib_gid gid;
323         struct mlx4_port_gid_table   *port_gid_table;
324         int real_index = -EINVAL;
325         int i;
326         int ret;
327         unsigned long flags;
328
329         if (port_num > MLX4_MAX_PORTS)
330                 return -EINVAL;
331
332         if (mlx4_is_bonded(ibdev->dev))
333                 port_num = 1;
334
335         if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
336                 return index;
337
338         ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, NULL);
339         if (ret)
340                 return ret;
341
342         if (!memcmp(&gid, &zgid, sizeof(gid)))
343                 return -EINVAL;
344
345         spin_lock_irqsave(&iboe->lock, flags);
346         port_gid_table = &iboe->gids[port_num - 1];
347
348         for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
349                 if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid))) {
350                         ctx = port_gid_table->gids[i].ctx;
351                         break;
352                 }
353         if (ctx)
354                 real_index = ctx->real_index;
355         spin_unlock_irqrestore(&iboe->lock, flags);
356         return real_index;
357 }
358
359 static int mlx4_ib_query_device(struct ib_device *ibdev,
360                                 struct ib_device_attr *props,
361                                 struct ib_udata *uhw)
362 {
363         struct mlx4_ib_dev *dev = to_mdev(ibdev);
364         struct ib_smp *in_mad  = NULL;
365         struct ib_smp *out_mad = NULL;
366         int err = -ENOMEM;
367         int have_ib_ports;
368         struct mlx4_uverbs_ex_query_device cmd;
369         struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
370         struct mlx4_clock_params clock_params;
371
372         if (uhw->inlen) {
373                 if (uhw->inlen < sizeof(cmd))
374                         return -EINVAL;
375
376                 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
377                 if (err)
378                         return err;
379
380                 if (cmd.comp_mask)
381                         return -EINVAL;
382
383                 if (cmd.reserved)
384                         return -EINVAL;
385         }
386
387         resp.response_length = offsetof(typeof(resp), response_length) +
388                 sizeof(resp.response_length);
389         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
390         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
391         if (!in_mad || !out_mad)
392                 goto out;
393
394         init_query_mad(in_mad);
395         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
396
397         err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
398                            1, NULL, NULL, in_mad, out_mad);
399         if (err)
400                 goto out;
401
402         memset(props, 0, sizeof *props);
403
404         have_ib_ports = num_ib_ports(dev->dev);
405
406         props->fw_ver = dev->dev->caps.fw_ver;
407         props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
408                 IB_DEVICE_PORT_ACTIVE_EVENT             |
409                 IB_DEVICE_SYS_IMAGE_GUID                |
410                 IB_DEVICE_RC_RNR_NAK_GEN                |
411                 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
412         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
413                 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
414         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
415                 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
416         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
417                 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
418         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
419                 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
420         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
421                 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
422         if (dev->dev->caps.max_gso_sz &&
423             (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
424             (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
425                 props->device_cap_flags |= IB_DEVICE_UD_TSO;
426         if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
427                 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
428         if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
429             (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
430             (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
431                 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
432         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
433                 props->device_cap_flags |= IB_DEVICE_XRC;
434         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
435                 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
436         if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
437                 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
438                         props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
439                 else
440                         props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
441         if (dev->steering_support ==  MLX4_STEERING_MODE_DEVICE_MANAGED)
442                 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
443         }
444
445         props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
446
447         props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
448                 0xffffff;
449         props->vendor_part_id      = dev->dev->persist->pdev->device;
450         props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
451         memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
452
453         props->max_mr_size         = ~0ull;
454         props->page_size_cap       = dev->dev->caps.page_size_cap;
455         props->max_qp              = dev->dev->quotas.qp;
456         props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
457         props->max_sge             = min(dev->dev->caps.max_sq_sg,
458                                          dev->dev->caps.max_rq_sg);
459         props->max_sge_rd          = MLX4_MAX_SGE_RD;
460         props->max_cq              = dev->dev->quotas.cq;
461         props->max_cqe             = dev->dev->caps.max_cqes;
462         props->max_mr              = dev->dev->quotas.mpt;
463         props->max_pd              = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
464         props->max_qp_rd_atom      = dev->dev->caps.max_qp_dest_rdma;
465         props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
466         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
467         props->max_srq             = dev->dev->quotas.srq;
468         props->max_srq_wr          = dev->dev->caps.max_srq_wqes - 1;
469         props->max_srq_sge         = dev->dev->caps.max_srq_sge;
470         props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
471         props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
472         props->atomic_cap          = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
473                 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
474         props->masked_atomic_cap   = props->atomic_cap;
475         props->max_pkeys           = dev->dev->caps.pkey_table_len[1];
476         props->max_mcast_grp       = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
477         props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
478         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
479                                            props->max_mcast_grp;
480         props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
481         props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
482         props->timestamp_mask = 0xFFFFFFFFFFFFULL;
483
484         if (!mlx4_is_slave(dev->dev))
485                 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
486
487         if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
488                 resp.response_length += sizeof(resp.hca_core_clock_offset);
489                 if (!err && !mlx4_is_slave(dev->dev)) {
490                         resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
491                         resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
492                 }
493         }
494
495         if (uhw->outlen) {
496                 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
497                 if (err)
498                         goto out;
499         }
500 out:
501         kfree(in_mad);
502         kfree(out_mad);
503
504         return err;
505 }
506
507 static enum rdma_link_layer
508 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
509 {
510         struct mlx4_dev *dev = to_mdev(device)->dev;
511
512         return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
513                 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
514 }
515
516 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
517                               struct ib_port_attr *props, int netw_view)
518 {
519         struct ib_smp *in_mad  = NULL;
520         struct ib_smp *out_mad = NULL;
521         int ext_active_speed;
522         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
523         int err = -ENOMEM;
524
525         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
526         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
527         if (!in_mad || !out_mad)
528                 goto out;
529
530         init_query_mad(in_mad);
531         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
532         in_mad->attr_mod = cpu_to_be32(port);
533
534         if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
535                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
536
537         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
538                                 in_mad, out_mad);
539         if (err)
540                 goto out;
541
542
543         props->lid              = be16_to_cpup((__be16 *) (out_mad->data + 16));
544         props->lmc              = out_mad->data[34] & 0x7;
545         props->sm_lid           = be16_to_cpup((__be16 *) (out_mad->data + 18));
546         props->sm_sl            = out_mad->data[36] & 0xf;
547         props->state            = out_mad->data[32] & 0xf;
548         props->phys_state       = out_mad->data[33] >> 4;
549         props->port_cap_flags   = be32_to_cpup((__be32 *) (out_mad->data + 20));
550         if (netw_view)
551                 props->gid_tbl_len = out_mad->data[50];
552         else
553                 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
554         props->max_msg_sz       = to_mdev(ibdev)->dev->caps.max_msg_sz;
555         props->pkey_tbl_len     = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
556         props->bad_pkey_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 46));
557         props->qkey_viol_cntr   = be16_to_cpup((__be16 *) (out_mad->data + 48));
558         props->active_width     = out_mad->data[31] & 0xf;
559         props->active_speed     = out_mad->data[35] >> 4;
560         props->max_mtu          = out_mad->data[41] & 0xf;
561         props->active_mtu       = out_mad->data[36] >> 4;
562         props->subnet_timeout   = out_mad->data[51] & 0x1f;
563         props->max_vl_num       = out_mad->data[37] >> 4;
564         props->init_type_reply  = out_mad->data[41] >> 4;
565
566         /* Check if extended speeds (EDR/FDR/...) are supported */
567         if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
568                 ext_active_speed = out_mad->data[62] >> 4;
569
570                 switch (ext_active_speed) {
571                 case 1:
572                         props->active_speed = IB_SPEED_FDR;
573                         break;
574                 case 2:
575                         props->active_speed = IB_SPEED_EDR;
576                         break;
577                 }
578         }
579
580         /* If reported active speed is QDR, check if is FDR-10 */
581         if (props->active_speed == IB_SPEED_QDR) {
582                 init_query_mad(in_mad);
583                 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
584                 in_mad->attr_mod = cpu_to_be32(port);
585
586                 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
587                                    NULL, NULL, in_mad, out_mad);
588                 if (err)
589                         goto out;
590
591                 /* Checking LinkSpeedActive for FDR-10 */
592                 if (out_mad->data[15] & 0x1)
593                         props->active_speed = IB_SPEED_FDR10;
594         }
595
596         /* Avoid wrong speed value returned by FW if the IB link is down. */
597         if (props->state == IB_PORT_DOWN)
598                  props->active_speed = IB_SPEED_SDR;
599
600 out:
601         kfree(in_mad);
602         kfree(out_mad);
603         return err;
604 }
605
606 static u8 state_to_phys_state(enum ib_port_state state)
607 {
608         return state == IB_PORT_ACTIVE ? 5 : 3;
609 }
610
611 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
612                                struct ib_port_attr *props, int netw_view)
613 {
614
615         struct mlx4_ib_dev *mdev = to_mdev(ibdev);
616         struct mlx4_ib_iboe *iboe = &mdev->iboe;
617         struct net_device *ndev;
618         enum ib_mtu tmp;
619         struct mlx4_cmd_mailbox *mailbox;
620         int err = 0;
621         int is_bonded = mlx4_is_bonded(mdev->dev);
622
623         mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
624         if (IS_ERR(mailbox))
625                 return PTR_ERR(mailbox);
626
627         err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
628                            MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
629                            MLX4_CMD_WRAPPED);
630         if (err)
631                 goto out;
632
633         props->active_width     =  (((u8 *)mailbox->buf)[5] == 0x40) ||
634                                    (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
635                                            IB_WIDTH_4X : IB_WIDTH_1X;
636         props->active_speed     =  (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
637                                            IB_SPEED_FDR : IB_SPEED_QDR;
638         props->port_cap_flags   = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
639         props->gid_tbl_len      = mdev->dev->caps.gid_table_len[port];
640         props->max_msg_sz       = mdev->dev->caps.max_msg_sz;
641         props->pkey_tbl_len     = 1;
642         props->max_mtu          = IB_MTU_4096;
643         props->max_vl_num       = 2;
644         props->state            = IB_PORT_DOWN;
645         props->phys_state       = state_to_phys_state(props->state);
646         props->active_mtu       = IB_MTU_256;
647         spin_lock_bh(&iboe->lock);
648         ndev = iboe->netdevs[port - 1];
649         if (ndev && is_bonded) {
650                 rcu_read_lock(); /* required to get upper dev */
651                 ndev = netdev_master_upper_dev_get_rcu(ndev);
652                 rcu_read_unlock();
653         }
654         if (!ndev)
655                 goto out_unlock;
656
657         tmp = iboe_get_mtu(ndev->mtu);
658         props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
659
660         props->state            = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
661                                         IB_PORT_ACTIVE : IB_PORT_DOWN;
662         props->phys_state       = state_to_phys_state(props->state);
663 out_unlock:
664         spin_unlock_bh(&iboe->lock);
665 out:
666         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
667         return err;
668 }
669
670 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
671                          struct ib_port_attr *props, int netw_view)
672 {
673         int err;
674
675         memset(props, 0, sizeof *props);
676
677         err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
678                 ib_link_query_port(ibdev, port, props, netw_view) :
679                                 eth_link_query_port(ibdev, port, props, netw_view);
680
681         return err;
682 }
683
684 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
685                               struct ib_port_attr *props)
686 {
687         /* returns host view */
688         return __mlx4_ib_query_port(ibdev, port, props, 0);
689 }
690
691 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
692                         union ib_gid *gid, int netw_view)
693 {
694         struct ib_smp *in_mad  = NULL;
695         struct ib_smp *out_mad = NULL;
696         int err = -ENOMEM;
697         struct mlx4_ib_dev *dev = to_mdev(ibdev);
698         int clear = 0;
699         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
700
701         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
702         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
703         if (!in_mad || !out_mad)
704                 goto out;
705
706         init_query_mad(in_mad);
707         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
708         in_mad->attr_mod = cpu_to_be32(port);
709
710         if (mlx4_is_mfunc(dev->dev) && netw_view)
711                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
712
713         err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
714         if (err)
715                 goto out;
716
717         memcpy(gid->raw, out_mad->data + 8, 8);
718
719         if (mlx4_is_mfunc(dev->dev) && !netw_view) {
720                 if (index) {
721                         /* For any index > 0, return the null guid */
722                         err = 0;
723                         clear = 1;
724                         goto out;
725                 }
726         }
727
728         init_query_mad(in_mad);
729         in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
730         in_mad->attr_mod = cpu_to_be32(index / 8);
731
732         err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
733                            NULL, NULL, in_mad, out_mad);
734         if (err)
735                 goto out;
736
737         memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
738
739 out:
740         if (clear)
741                 memset(gid->raw + 8, 0, 8);
742         kfree(in_mad);
743         kfree(out_mad);
744         return err;
745 }
746
747 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
748                              union ib_gid *gid)
749 {
750         int ret;
751
752         if (rdma_protocol_ib(ibdev, port))
753                 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
754
755         if (!rdma_protocol_roce(ibdev, port))
756                 return -ENODEV;
757
758         if (!rdma_cap_roce_gid_table(ibdev, port))
759                 return -ENODEV;
760
761         ret = ib_get_cached_gid(ibdev, port, index, gid, NULL);
762         if (ret == -EAGAIN) {
763                 memcpy(gid, &zgid, sizeof(*gid));
764                 return 0;
765         }
766
767         return ret;
768 }
769
770 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
771                          u16 *pkey, int netw_view)
772 {
773         struct ib_smp *in_mad  = NULL;
774         struct ib_smp *out_mad = NULL;
775         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
776         int err = -ENOMEM;
777
778         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
779         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
780         if (!in_mad || !out_mad)
781                 goto out;
782
783         init_query_mad(in_mad);
784         in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
785         in_mad->attr_mod = cpu_to_be32(index / 32);
786
787         if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
788                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
789
790         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
791                            in_mad, out_mad);
792         if (err)
793                 goto out;
794
795         *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
796
797 out:
798         kfree(in_mad);
799         kfree(out_mad);
800         return err;
801 }
802
803 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
804 {
805         return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
806 }
807
808 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
809                                  struct ib_device_modify *props)
810 {
811         struct mlx4_cmd_mailbox *mailbox;
812         unsigned long flags;
813
814         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
815                 return -EOPNOTSUPP;
816
817         if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
818                 return 0;
819
820         if (mlx4_is_slave(to_mdev(ibdev)->dev))
821                 return -EOPNOTSUPP;
822
823         spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
824         memcpy(ibdev->node_desc, props->node_desc, 64);
825         spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
826
827         /*
828          * If possible, pass node desc to FW, so it can generate
829          * a 144 trap.  If cmd fails, just ignore.
830          */
831         mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
832         if (IS_ERR(mailbox))
833                 return 0;
834
835         memcpy(mailbox->buf, props->node_desc, 64);
836         mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
837                  MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
838
839         mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
840
841         return 0;
842 }
843
844 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
845                             u32 cap_mask)
846 {
847         struct mlx4_cmd_mailbox *mailbox;
848         int err;
849
850         mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
851         if (IS_ERR(mailbox))
852                 return PTR_ERR(mailbox);
853
854         if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
855                 *(u8 *) mailbox->buf         = !!reset_qkey_viols << 6;
856                 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
857         } else {
858                 ((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
859                 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
860         }
861
862         err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
863                        MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
864                        MLX4_CMD_WRAPPED);
865
866         mlx4_free_cmd_mailbox(dev->dev, mailbox);
867         return err;
868 }
869
870 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
871                                struct ib_port_modify *props)
872 {
873         struct mlx4_ib_dev *mdev = to_mdev(ibdev);
874         u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
875         struct ib_port_attr attr;
876         u32 cap_mask;
877         int err;
878
879         /* return OK if this is RoCE. CM calls ib_modify_port() regardless
880          * of whether port link layer is ETH or IB. For ETH ports, qkey
881          * violations and port capabilities are not meaningful.
882          */
883         if (is_eth)
884                 return 0;
885
886         mutex_lock(&mdev->cap_mask_mutex);
887
888         err = mlx4_ib_query_port(ibdev, port, &attr);
889         if (err)
890                 goto out;
891
892         cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
893                 ~props->clr_port_cap_mask;
894
895         err = mlx4_ib_SET_PORT(mdev, port,
896                                !!(mask & IB_PORT_RESET_QKEY_CNTR),
897                                cap_mask);
898
899 out:
900         mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
901         return err;
902 }
903
904 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
905                                                   struct ib_udata *udata)
906 {
907         struct mlx4_ib_dev *dev = to_mdev(ibdev);
908         struct mlx4_ib_ucontext *context;
909         struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
910         struct mlx4_ib_alloc_ucontext_resp resp;
911         int err;
912
913         if (!dev->ib_active)
914                 return ERR_PTR(-EAGAIN);
915
916         if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
917                 resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
918                 resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
919                 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
920         } else {
921                 resp.dev_caps         = dev->dev->caps.userspace_caps;
922                 resp.qp_tab_size      = dev->dev->caps.num_qps;
923                 resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
924                 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
925                 resp.cqe_size         = dev->dev->caps.cqe_size;
926         }
927
928         context = kzalloc(sizeof(*context), GFP_KERNEL);
929         if (!context)
930                 return ERR_PTR(-ENOMEM);
931
932         err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
933         if (err) {
934                 kfree(context);
935                 return ERR_PTR(err);
936         }
937
938         INIT_LIST_HEAD(&context->db_page_list);
939         mutex_init(&context->db_page_mutex);
940
941         if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
942                 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
943         else
944                 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
945
946         if (err) {
947                 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
948                 kfree(context);
949                 return ERR_PTR(-EFAULT);
950         }
951
952         return &context->ibucontext;
953 }
954
955 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
956 {
957         struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
958
959         mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
960         kfree(context);
961
962         return 0;
963 }
964
965 static void  mlx4_ib_vma_open(struct vm_area_struct *area)
966 {
967         /* vma_open is called when a new VMA is created on top of our VMA.
968          * This is done through either mremap flow or split_vma (usually due
969          * to mlock, madvise, munmap, etc.). We do not support a clone of the
970          * vma, as this VMA is strongly hardware related. Therefore we set the
971          * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
972          * calling us again and trying to do incorrect actions. We assume that
973          * the original vma size is exactly a single page that there will be no
974          * "splitting" operations on.
975          */
976         area->vm_ops = NULL;
977 }
978
979 static void  mlx4_ib_vma_close(struct vm_area_struct *area)
980 {
981         struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
982
983         /* It's guaranteed that all VMAs opened on a FD are closed before the
984          * file itself is closed, therefore no sync is needed with the regular
985          * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
986          * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
987          * The close operation is usually called under mm->mmap_sem except when
988          * process is exiting.  The exiting case is handled explicitly as part
989          * of mlx4_ib_disassociate_ucontext.
990          */
991         mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
992                                 area->vm_private_data;
993
994         /* set the vma context pointer to null in the mlx4_ib driver's private
995          * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
996          */
997         mlx4_ib_vma_priv_data->vma = NULL;
998 }
999
1000 static const struct vm_operations_struct mlx4_ib_vm_ops = {
1001         .open = mlx4_ib_vma_open,
1002         .close = mlx4_ib_vma_close
1003 };
1004
1005 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1006 {
1007         int i;
1008         int ret = 0;
1009         struct vm_area_struct *vma;
1010         struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1011         struct task_struct *owning_process  = NULL;
1012         struct mm_struct   *owning_mm       = NULL;
1013
1014         owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
1015         if (!owning_process)
1016                 return;
1017
1018         owning_mm = get_task_mm(owning_process);
1019         if (!owning_mm) {
1020                 pr_info("no mm, disassociate ucontext is pending task termination\n");
1021                 while (1) {
1022                         /* make sure that task is dead before returning, it may
1023                          * prevent a rare case of module down in parallel to a
1024                          * call to mlx4_ib_vma_close.
1025                          */
1026                         put_task_struct(owning_process);
1027                         msleep(1);
1028                         owning_process = get_pid_task(ibcontext->tgid,
1029                                                       PIDTYPE_PID);
1030                         if (!owning_process ||
1031                             owning_process->state == TASK_DEAD) {
1032                                 pr_info("disassociate ucontext done, task was terminated\n");
1033                                 /* in case task was dead need to release the task struct */
1034                                 if (owning_process)
1035                                         put_task_struct(owning_process);
1036                                 return;
1037                         }
1038                 }
1039         }
1040
1041         /* need to protect from a race on closing the vma as part of
1042          * mlx4_ib_vma_close().
1043          */
1044         down_write(&owning_mm->mmap_sem);
1045         if (!mmget_still_valid(owning_mm))
1046                 goto skip_mm;
1047         for (i = 0; i < HW_BAR_COUNT; i++) {
1048                 vma = context->hw_bar_info[i].vma;
1049                 if (!vma)
1050                         continue;
1051
1052                 ret = zap_vma_ptes(context->hw_bar_info[i].vma,
1053                                    context->hw_bar_info[i].vma->vm_start,
1054                                    PAGE_SIZE);
1055                 if (ret) {
1056                         pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
1057                         BUG_ON(1);
1058                 }
1059
1060                 context->hw_bar_info[i].vma->vm_flags &=
1061                         ~(VM_SHARED | VM_MAYSHARE);
1062                 /* context going to be destroyed, should not access ops any more */
1063                 context->hw_bar_info[i].vma->vm_ops = NULL;
1064         }
1065
1066 skip_mm:
1067         up_write(&owning_mm->mmap_sem);
1068         mmput(owning_mm);
1069         put_task_struct(owning_process);
1070 }
1071
1072 static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
1073                                  struct mlx4_ib_vma_private_data *vma_private_data)
1074 {
1075         vma_private_data->vma = vma;
1076         vma->vm_private_data = vma_private_data;
1077         vma->vm_ops =  &mlx4_ib_vm_ops;
1078 }
1079
1080 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1081 {
1082         struct mlx4_ib_dev *dev = to_mdev(context->device);
1083         struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
1084
1085         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1086                 return -EINVAL;
1087
1088         if (vma->vm_pgoff == 0) {
1089                 /* We prevent double mmaping on same context */
1090                 if (mucontext->hw_bar_info[HW_BAR_DB].vma)
1091                         return -EINVAL;
1092
1093                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1094
1095                 if (io_remap_pfn_range(vma, vma->vm_start,
1096                                        to_mucontext(context)->uar.pfn,
1097                                        PAGE_SIZE, vma->vm_page_prot))
1098                         return -EAGAIN;
1099
1100                 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
1101
1102         } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
1103                 /* We prevent double mmaping on same context */
1104                 if (mucontext->hw_bar_info[HW_BAR_BF].vma)
1105                         return -EINVAL;
1106
1107                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1108
1109                 if (io_remap_pfn_range(vma, vma->vm_start,
1110                                        to_mucontext(context)->uar.pfn +
1111                                        dev->dev->caps.num_uars,
1112                                        PAGE_SIZE, vma->vm_page_prot))
1113                         return -EAGAIN;
1114
1115                 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
1116
1117         } else if (vma->vm_pgoff == 3) {
1118                 struct mlx4_clock_params params;
1119                 int ret;
1120
1121                 /* We prevent double mmaping on same context */
1122                 if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
1123                         return -EINVAL;
1124
1125                 ret = mlx4_get_internal_clock_params(dev->dev, &params);
1126
1127                 if (ret)
1128                         return ret;
1129
1130                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1131                 if (io_remap_pfn_range(vma, vma->vm_start,
1132                                        (pci_resource_start(dev->dev->persist->pdev,
1133                                                            params.bar) +
1134                                         params.offset)
1135                                        >> PAGE_SHIFT,
1136                                        PAGE_SIZE, vma->vm_page_prot))
1137                         return -EAGAIN;
1138
1139                 mlx4_ib_set_vma_data(vma,
1140                                      &mucontext->hw_bar_info[HW_BAR_CLOCK]);
1141         } else {
1142                 return -EINVAL;
1143         }
1144
1145         return 0;
1146 }
1147
1148 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
1149                                       struct ib_ucontext *context,
1150                                       struct ib_udata *udata)
1151 {
1152         struct mlx4_ib_pd *pd;
1153         int err;
1154
1155         pd = kmalloc(sizeof *pd, GFP_KERNEL);
1156         if (!pd)
1157                 return ERR_PTR(-ENOMEM);
1158
1159         err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1160         if (err) {
1161                 kfree(pd);
1162                 return ERR_PTR(err);
1163         }
1164
1165         if (context)
1166                 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
1167                         mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1168                         kfree(pd);
1169                         return ERR_PTR(-EFAULT);
1170                 }
1171
1172         return &pd->ibpd;
1173 }
1174
1175 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
1176 {
1177         mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1178         kfree(pd);
1179
1180         return 0;
1181 }
1182
1183 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1184                                           struct ib_ucontext *context,
1185                                           struct ib_udata *udata)
1186 {
1187         struct mlx4_ib_xrcd *xrcd;
1188         struct ib_cq_init_attr cq_attr = {};
1189         int err;
1190
1191         if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1192                 return ERR_PTR(-ENOSYS);
1193
1194         xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
1195         if (!xrcd)
1196                 return ERR_PTR(-ENOMEM);
1197
1198         err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1199         if (err)
1200                 goto err1;
1201
1202         xrcd->pd = ib_alloc_pd(ibdev);
1203         if (IS_ERR(xrcd->pd)) {
1204                 err = PTR_ERR(xrcd->pd);
1205                 goto err2;
1206         }
1207
1208         cq_attr.cqe = 1;
1209         xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
1210         if (IS_ERR(xrcd->cq)) {
1211                 err = PTR_ERR(xrcd->cq);
1212                 goto err3;
1213         }
1214
1215         return &xrcd->ibxrcd;
1216
1217 err3:
1218         ib_dealloc_pd(xrcd->pd);
1219 err2:
1220         mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1221 err1:
1222         kfree(xrcd);
1223         return ERR_PTR(err);
1224 }
1225
1226 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1227 {
1228         ib_destroy_cq(to_mxrcd(xrcd)->cq);
1229         ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1230         mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1231         kfree(xrcd);
1232
1233         return 0;
1234 }
1235
1236 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1237 {
1238         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1239         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1240         struct mlx4_ib_gid_entry *ge;
1241
1242         ge = kzalloc(sizeof *ge, GFP_KERNEL);
1243         if (!ge)
1244                 return -ENOMEM;
1245
1246         ge->gid = *gid;
1247         if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1248                 ge->port = mqp->port;
1249                 ge->added = 1;
1250         }
1251
1252         mutex_lock(&mqp->mutex);
1253         list_add_tail(&ge->list, &mqp->gid_list);
1254         mutex_unlock(&mqp->mutex);
1255
1256         return 0;
1257 }
1258
1259 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1260                                           struct mlx4_ib_counters *ctr_table)
1261 {
1262         struct counter_index *counter, *tmp_count;
1263
1264         mutex_lock(&ctr_table->mutex);
1265         list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1266                                  list) {
1267                 if (counter->allocated)
1268                         mlx4_counter_free(ibdev->dev, counter->index);
1269                 list_del(&counter->list);
1270                 kfree(counter);
1271         }
1272         mutex_unlock(&ctr_table->mutex);
1273 }
1274
1275 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1276                    union ib_gid *gid)
1277 {
1278         struct net_device *ndev;
1279         int ret = 0;
1280
1281         if (!mqp->port)
1282                 return 0;
1283
1284         spin_lock_bh(&mdev->iboe.lock);
1285         ndev = mdev->iboe.netdevs[mqp->port - 1];
1286         if (ndev)
1287                 dev_hold(ndev);
1288         spin_unlock_bh(&mdev->iboe.lock);
1289
1290         if (ndev) {
1291                 ret = 1;
1292                 dev_put(ndev);
1293         }
1294
1295         return ret;
1296 }
1297
1298 struct mlx4_ib_steering {
1299         struct list_head list;
1300         struct mlx4_flow_reg_id reg_id;
1301         union ib_gid gid;
1302 };
1303
1304 static int parse_flow_attr(struct mlx4_dev *dev,
1305                            u32 qp_num,
1306                            union ib_flow_spec *ib_spec,
1307                            struct _rule_hw *mlx4_spec)
1308 {
1309         enum mlx4_net_trans_rule_id type;
1310
1311         switch (ib_spec->type) {
1312         case IB_FLOW_SPEC_ETH:
1313                 type = MLX4_NET_TRANS_RULE_ID_ETH;
1314                 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1315                        ETH_ALEN);
1316                 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1317                        ETH_ALEN);
1318                 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1319                 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1320                 break;
1321         case IB_FLOW_SPEC_IB:
1322                 type = MLX4_NET_TRANS_RULE_ID_IB;
1323                 mlx4_spec->ib.l3_qpn =
1324                         cpu_to_be32(qp_num);
1325                 mlx4_spec->ib.qpn_mask =
1326                         cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1327                 break;
1328
1329
1330         case IB_FLOW_SPEC_IPV4:
1331                 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1332                 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1333                 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1334                 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1335                 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1336                 break;
1337
1338         case IB_FLOW_SPEC_TCP:
1339         case IB_FLOW_SPEC_UDP:
1340                 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1341                                         MLX4_NET_TRANS_RULE_ID_TCP :
1342                                         MLX4_NET_TRANS_RULE_ID_UDP;
1343                 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1344                 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1345                 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1346                 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1347                 break;
1348
1349         default:
1350                 return -EINVAL;
1351         }
1352         if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1353             mlx4_hw_rule_sz(dev, type) < 0)
1354                 return -EINVAL;
1355         mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1356         mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1357         return mlx4_hw_rule_sz(dev, type);
1358 }
1359
1360 struct default_rules {
1361         __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1362         __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1363         __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1364         __u8  link_layer;
1365 };
1366 static const struct default_rules default_table[] = {
1367         {
1368                 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1369                 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1370                 .rules_create_list = {IB_FLOW_SPEC_IB},
1371                 .link_layer = IB_LINK_LAYER_INFINIBAND
1372         }
1373 };
1374
1375 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1376                                          struct ib_flow_attr *flow_attr)
1377 {
1378         int i, j, k;
1379         void *ib_flow;
1380         const struct default_rules *pdefault_rules = default_table;
1381         u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1382
1383         for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
1384                 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1385                 memset(&field_types, 0, sizeof(field_types));
1386
1387                 if (link_layer != pdefault_rules->link_layer)
1388                         continue;
1389
1390                 ib_flow = flow_attr + 1;
1391                 /* we assume the specs are sorted */
1392                 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1393                      j < flow_attr->num_of_specs; k++) {
1394                         union ib_flow_spec *current_flow =
1395                                 (union ib_flow_spec *)ib_flow;
1396
1397                         /* same layer but different type */
1398                         if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1399                              (pdefault_rules->mandatory_fields[k] &
1400                               IB_FLOW_SPEC_LAYER_MASK)) &&
1401                             (current_flow->type !=
1402                              pdefault_rules->mandatory_fields[k]))
1403                                 goto out;
1404
1405                         /* same layer, try match next one */
1406                         if (current_flow->type ==
1407                             pdefault_rules->mandatory_fields[k]) {
1408                                 j++;
1409                                 ib_flow +=
1410                                         ((union ib_flow_spec *)ib_flow)->size;
1411                         }
1412                 }
1413
1414                 ib_flow = flow_attr + 1;
1415                 for (j = 0; j < flow_attr->num_of_specs;
1416                      j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1417                         for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1418                                 /* same layer and same type */
1419                                 if (((union ib_flow_spec *)ib_flow)->type ==
1420                                     pdefault_rules->mandatory_not_fields[k])
1421                                         goto out;
1422
1423                 return i;
1424         }
1425 out:
1426         return -1;
1427 }
1428
1429 static int __mlx4_ib_create_default_rules(
1430                 struct mlx4_ib_dev *mdev,
1431                 struct ib_qp *qp,
1432                 const struct default_rules *pdefault_rules,
1433                 struct _rule_hw *mlx4_spec) {
1434         int size = 0;
1435         int i;
1436
1437         for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
1438                 union ib_flow_spec ib_spec = {};
1439                 int ret;
1440
1441                 switch (pdefault_rules->rules_create_list[i]) {
1442                 case 0:
1443                         /* no rule */
1444                         continue;
1445                 case IB_FLOW_SPEC_IB:
1446                         ib_spec.type = IB_FLOW_SPEC_IB;
1447                         ib_spec.size = sizeof(struct ib_flow_spec_ib);
1448
1449                         break;
1450                 default:
1451                         /* invalid rule */
1452                         return -EINVAL;
1453                 }
1454                 /* We must put empty rule, qpn is being ignored */
1455                 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1456                                       mlx4_spec);
1457                 if (ret < 0) {
1458                         pr_info("invalid parsing\n");
1459                         return -EINVAL;
1460                 }
1461
1462                 mlx4_spec = (void *)mlx4_spec + ret;
1463                 size += ret;
1464         }
1465         return size;
1466 }
1467
1468 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1469                           int domain,
1470                           enum mlx4_net_trans_promisc_mode flow_type,
1471                           u64 *reg_id)
1472 {
1473         int ret, i;
1474         int size = 0;
1475         void *ib_flow;
1476         struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1477         struct mlx4_cmd_mailbox *mailbox;
1478         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1479         int default_flow;
1480
1481         static const u16 __mlx4_domain[] = {
1482                 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1483                 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1484                 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1485                 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1486         };
1487
1488         if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1489                 pr_err("Invalid priority value %d\n", flow_attr->priority);
1490                 return -EINVAL;
1491         }
1492
1493         if (domain >= IB_FLOW_DOMAIN_NUM) {
1494                 pr_err("Invalid domain value %d\n", domain);
1495                 return -EINVAL;
1496         }
1497
1498         if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1499                 return -EINVAL;
1500
1501         mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1502         if (IS_ERR(mailbox))
1503                 return PTR_ERR(mailbox);
1504         ctrl = mailbox->buf;
1505
1506         ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1507                                  flow_attr->priority);
1508         ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1509         ctrl->port = flow_attr->port;
1510         ctrl->qpn = cpu_to_be32(qp->qp_num);
1511
1512         ib_flow = flow_attr + 1;
1513         size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1514         /* Add default flows */
1515         default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1516         if (default_flow >= 0) {
1517                 ret = __mlx4_ib_create_default_rules(
1518                                 mdev, qp, default_table + default_flow,
1519                                 mailbox->buf + size);
1520                 if (ret < 0) {
1521                         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1522                         return -EINVAL;
1523                 }
1524                 size += ret;
1525         }
1526         for (i = 0; i < flow_attr->num_of_specs; i++) {
1527                 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1528                                       mailbox->buf + size);
1529                 if (ret < 0) {
1530                         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1531                         return -EINVAL;
1532                 }
1533                 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1534                 size += ret;
1535         }
1536
1537         ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1538                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1539                            MLX4_CMD_WRAPPED);
1540         if (ret == -ENOMEM)
1541                 pr_err("mcg table is full. Fail to register network rule.\n");
1542         else if (ret == -ENXIO)
1543                 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1544         else if (ret)
1545                 pr_err("Invalid argumant. Fail to register network rule.\n");
1546
1547         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1548         return ret;
1549 }
1550
1551 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1552 {
1553         int err;
1554         err = mlx4_cmd(dev, reg_id, 0, 0,
1555                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1556                        MLX4_CMD_WRAPPED);
1557         if (err)
1558                 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1559                        reg_id);
1560         return err;
1561 }
1562
1563 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1564                                     u64 *reg_id)
1565 {
1566         void *ib_flow;
1567         union ib_flow_spec *ib_spec;
1568         struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1569         int err = 0;
1570
1571         if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1572             dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1573                 return 0; /* do nothing */
1574
1575         ib_flow = flow_attr + 1;
1576         ib_spec = (union ib_flow_spec *)ib_flow;
1577
1578         if (ib_spec->type !=  IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1579                 return 0; /* do nothing */
1580
1581         err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1582                                     flow_attr->port, qp->qp_num,
1583                                     MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1584                                     reg_id);
1585         return err;
1586 }
1587
1588 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1589                                     struct ib_flow_attr *flow_attr,
1590                                     int domain)
1591 {
1592         int err = 0, i = 0, j = 0;
1593         struct mlx4_ib_flow *mflow;
1594         enum mlx4_net_trans_promisc_mode type[2];
1595         struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1596         int is_bonded = mlx4_is_bonded(dev);
1597
1598         memset(type, 0, sizeof(type));
1599
1600         mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1601         if (!mflow) {
1602                 err = -ENOMEM;
1603                 goto err_free;
1604         }
1605
1606         switch (flow_attr->type) {
1607         case IB_FLOW_ATTR_NORMAL:
1608                 type[0] = MLX4_FS_REGULAR;
1609                 break;
1610
1611         case IB_FLOW_ATTR_ALL_DEFAULT:
1612                 type[0] = MLX4_FS_ALL_DEFAULT;
1613                 break;
1614
1615         case IB_FLOW_ATTR_MC_DEFAULT:
1616                 type[0] = MLX4_FS_MC_DEFAULT;
1617                 break;
1618
1619         case IB_FLOW_ATTR_SNIFFER:
1620                 type[0] = MLX4_FS_UC_SNIFFER;
1621                 type[1] = MLX4_FS_MC_SNIFFER;
1622                 break;
1623
1624         default:
1625                 err = -EINVAL;
1626                 goto err_free;
1627         }
1628
1629         while (i < ARRAY_SIZE(type) && type[i]) {
1630                 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1631                                             &mflow->reg_id[i].id);
1632                 if (err)
1633                         goto err_create_flow;
1634                 if (is_bonded) {
1635                         /* Application always sees one port so the mirror rule
1636                          * must be on port #2
1637                          */
1638                         flow_attr->port = 2;
1639                         err = __mlx4_ib_create_flow(qp, flow_attr,
1640                                                     domain, type[j],
1641                                                     &mflow->reg_id[j].mirror);
1642                         flow_attr->port = 1;
1643                         if (err)
1644                                 goto err_create_flow;
1645                         j++;
1646                 }
1647
1648                 i++;
1649         }
1650
1651         if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1652                 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1653                                                &mflow->reg_id[i].id);
1654                 if (err)
1655                         goto err_create_flow;
1656
1657                 if (is_bonded) {
1658                         flow_attr->port = 2;
1659                         err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1660                                                        &mflow->reg_id[j].mirror);
1661                         flow_attr->port = 1;
1662                         if (err)
1663                                 goto err_create_flow;
1664                         j++;
1665                 }
1666                 /* function to create mirror rule */
1667                 i++;
1668         }
1669
1670         return &mflow->ibflow;
1671
1672 err_create_flow:
1673         while (i) {
1674                 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1675                                              mflow->reg_id[i].id);
1676                 i--;
1677         }
1678
1679         while (j) {
1680                 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1681                                              mflow->reg_id[j].mirror);
1682                 j--;
1683         }
1684 err_free:
1685         kfree(mflow);
1686         return ERR_PTR(err);
1687 }
1688
1689 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1690 {
1691         int err, ret = 0;
1692         int i = 0;
1693         struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1694         struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1695
1696         while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1697                 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1698                 if (err)
1699                         ret = err;
1700                 if (mflow->reg_id[i].mirror) {
1701                         err = __mlx4_ib_destroy_flow(mdev->dev,
1702                                                      mflow->reg_id[i].mirror);
1703                         if (err)
1704                                 ret = err;
1705                 }
1706                 i++;
1707         }
1708
1709         kfree(mflow);
1710         return ret;
1711 }
1712
1713 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1714 {
1715         int err;
1716         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1717         struct mlx4_dev *dev = mdev->dev;
1718         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1719         struct mlx4_ib_steering *ib_steering = NULL;
1720         enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1721         struct mlx4_flow_reg_id reg_id;
1722
1723         if (mdev->dev->caps.steering_mode ==
1724             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1725                 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1726                 if (!ib_steering)
1727                         return -ENOMEM;
1728         }
1729
1730         err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1731                                     !!(mqp->flags &
1732                                        MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1733                                     prot, &reg_id.id);
1734         if (err) {
1735                 pr_err("multicast attach op failed, err %d\n", err);
1736                 goto err_malloc;
1737         }
1738
1739         reg_id.mirror = 0;
1740         if (mlx4_is_bonded(dev)) {
1741                 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1742                                             (mqp->port == 1) ? 2 : 1,
1743                                             !!(mqp->flags &
1744                                             MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1745                                             prot, &reg_id.mirror);
1746                 if (err)
1747                         goto err_add;
1748         }
1749
1750         err = add_gid_entry(ibqp, gid);
1751         if (err)
1752                 goto err_add;
1753
1754         if (ib_steering) {
1755                 memcpy(ib_steering->gid.raw, gid->raw, 16);
1756                 ib_steering->reg_id = reg_id;
1757                 mutex_lock(&mqp->mutex);
1758                 list_add(&ib_steering->list, &mqp->steering_rules);
1759                 mutex_unlock(&mqp->mutex);
1760         }
1761         return 0;
1762
1763 err_add:
1764         mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1765                               prot, reg_id.id);
1766         if (reg_id.mirror)
1767                 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1768                                       prot, reg_id.mirror);
1769 err_malloc:
1770         kfree(ib_steering);
1771
1772         return err;
1773 }
1774
1775 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1776 {
1777         struct mlx4_ib_gid_entry *ge;
1778         struct mlx4_ib_gid_entry *tmp;
1779         struct mlx4_ib_gid_entry *ret = NULL;
1780
1781         list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1782                 if (!memcmp(raw, ge->gid.raw, 16)) {
1783                         ret = ge;
1784                         break;
1785                 }
1786         }
1787
1788         return ret;
1789 }
1790
1791 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1792 {
1793         int err;
1794         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1795         struct mlx4_dev *dev = mdev->dev;
1796         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1797         struct net_device *ndev;
1798         struct mlx4_ib_gid_entry *ge;
1799         struct mlx4_flow_reg_id reg_id = {0, 0};
1800         enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
1801
1802         if (mdev->dev->caps.steering_mode ==
1803             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1804                 struct mlx4_ib_steering *ib_steering;
1805
1806                 mutex_lock(&mqp->mutex);
1807                 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1808                         if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1809                                 list_del(&ib_steering->list);
1810                                 break;
1811                         }
1812                 }
1813                 mutex_unlock(&mqp->mutex);
1814                 if (&ib_steering->list == &mqp->steering_rules) {
1815                         pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1816                         return -EINVAL;
1817                 }
1818                 reg_id = ib_steering->reg_id;
1819                 kfree(ib_steering);
1820         }
1821
1822         err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1823                                     prot, reg_id.id);
1824         if (err)
1825                 return err;
1826
1827         if (mlx4_is_bonded(dev)) {
1828                 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1829                                             prot, reg_id.mirror);
1830                 if (err)
1831                         return err;
1832         }
1833
1834         mutex_lock(&mqp->mutex);
1835         ge = find_gid_entry(mqp, gid->raw);
1836         if (ge) {
1837                 spin_lock_bh(&mdev->iboe.lock);
1838                 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1839                 if (ndev)
1840                         dev_hold(ndev);
1841                 spin_unlock_bh(&mdev->iboe.lock);
1842                 if (ndev)
1843                         dev_put(ndev);
1844                 list_del(&ge->list);
1845                 kfree(ge);
1846         } else
1847                 pr_warn("could not find mgid entry\n");
1848
1849         mutex_unlock(&mqp->mutex);
1850
1851         return 0;
1852 }
1853
1854 static int init_node_data(struct mlx4_ib_dev *dev)
1855 {
1856         struct ib_smp *in_mad  = NULL;
1857         struct ib_smp *out_mad = NULL;
1858         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1859         int err = -ENOMEM;
1860
1861         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1862         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1863         if (!in_mad || !out_mad)
1864                 goto out;
1865
1866         init_query_mad(in_mad);
1867         in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1868         if (mlx4_is_master(dev->dev))
1869                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1870
1871         err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1872         if (err)
1873                 goto out;
1874
1875         memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1876
1877         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1878
1879         err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1880         if (err)
1881                 goto out;
1882
1883         dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1884         memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1885
1886 out:
1887         kfree(in_mad);
1888         kfree(out_mad);
1889         return err;
1890 }
1891
1892 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1893                         char *buf)
1894 {
1895         struct mlx4_ib_dev *dev =
1896                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1897         return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
1898 }
1899
1900 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1901                            char *buf)
1902 {
1903         struct mlx4_ib_dev *dev =
1904                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1905         return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1906                        (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1907                        (int) dev->dev->caps.fw_ver & 0xffff);
1908 }
1909
1910 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1911                         char *buf)
1912 {
1913         struct mlx4_ib_dev *dev =
1914                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1915         return sprintf(buf, "%x\n", dev->dev->rev_id);
1916 }
1917
1918 static ssize_t show_board(struct device *device, struct device_attribute *attr,
1919                           char *buf)
1920 {
1921         struct mlx4_ib_dev *dev =
1922                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1923         return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1924                        dev->dev->board_id);
1925 }
1926
1927 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
1928 static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
1929 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
1930 static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
1931
1932 static struct device_attribute *mlx4_class_attributes[] = {
1933         &dev_attr_hw_rev,
1934         &dev_attr_fw_ver,
1935         &dev_attr_hca_type,
1936         &dev_attr_board_id
1937 };
1938
1939 #define MLX4_IB_INVALID_MAC     ((u64)-1)
1940 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1941                                struct net_device *dev,
1942                                int port)
1943 {
1944         u64 new_smac = 0;
1945         u64 release_mac = MLX4_IB_INVALID_MAC;
1946         struct mlx4_ib_qp *qp;
1947
1948         read_lock(&dev_base_lock);
1949         new_smac = mlx4_mac_to_u64(dev->dev_addr);
1950         read_unlock(&dev_base_lock);
1951
1952         atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
1953
1954         /* no need for update QP1 and mac registration in non-SRIOV */
1955         if (!mlx4_is_mfunc(ibdev->dev))
1956                 return;
1957
1958         mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
1959         qp = ibdev->qp1_proxy[port - 1];
1960         if (qp) {
1961                 int new_smac_index;
1962                 u64 old_smac;
1963                 struct mlx4_update_qp_params update_params;
1964
1965                 mutex_lock(&qp->mutex);
1966                 old_smac = qp->pri.smac;
1967                 if (new_smac == old_smac)
1968                         goto unlock;
1969
1970                 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
1971
1972                 if (new_smac_index < 0)
1973                         goto unlock;
1974
1975                 update_params.smac_index = new_smac_index;
1976                 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
1977                                    &update_params)) {
1978                         release_mac = new_smac;
1979                         goto unlock;
1980                 }
1981                 /* if old port was zero, no mac was yet registered for this QP */
1982                 if (qp->pri.smac_port)
1983                         release_mac = old_smac;
1984                 qp->pri.smac = new_smac;
1985                 qp->pri.smac_port = port;
1986                 qp->pri.smac_index = new_smac_index;
1987         }
1988
1989 unlock:
1990         if (release_mac != MLX4_IB_INVALID_MAC)
1991                 mlx4_unregister_mac(ibdev->dev, port, release_mac);
1992         if (qp)
1993                 mutex_unlock(&qp->mutex);
1994         mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
1995 }
1996
1997 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1998                                  struct net_device *dev,
1999                                  unsigned long event)
2000
2001 {
2002         struct mlx4_ib_iboe *iboe;
2003         int update_qps_port = -1;
2004         int port;
2005
2006         ASSERT_RTNL();
2007
2008         iboe = &ibdev->iboe;
2009
2010         spin_lock_bh(&iboe->lock);
2011         mlx4_foreach_ib_transport_port(port, ibdev->dev) {
2012
2013                 iboe->netdevs[port - 1] =
2014                         mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
2015
2016                 if (dev == iboe->netdevs[port - 1] &&
2017                     (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2018                      event == NETDEV_UP || event == NETDEV_CHANGE))
2019                         update_qps_port = port;
2020
2021         }
2022         spin_unlock_bh(&iboe->lock);
2023
2024         if (update_qps_port > 0)
2025                 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
2026 }
2027
2028 static int mlx4_ib_netdev_event(struct notifier_block *this,
2029                                 unsigned long event, void *ptr)
2030 {
2031         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2032         struct mlx4_ib_dev *ibdev;
2033
2034         if (!net_eq(dev_net(dev), &init_net))
2035                 return NOTIFY_DONE;
2036
2037         ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2038         mlx4_ib_scan_netdevs(ibdev, dev, event);
2039
2040         return NOTIFY_DONE;
2041 }
2042
2043 static void init_pkeys(struct mlx4_ib_dev *ibdev)
2044 {
2045         int port;
2046         int slave;
2047         int i;
2048
2049         if (mlx4_is_master(ibdev->dev)) {
2050                 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2051                      ++slave) {
2052                         for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2053                                 for (i = 0;
2054                                      i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2055                                      ++i) {
2056                                         ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2057                                         /* master has the identity virt2phys pkey mapping */
2058                                                 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2059                                                         ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2060                                         mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2061                                                              ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2062                                 }
2063                         }
2064                 }
2065                 /* initialize pkey cache */
2066                 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2067                         for (i = 0;
2068                              i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2069                              ++i)
2070                                 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2071                                         (i) ? 0 : 0xFFFF;
2072                 }
2073         }
2074 }
2075
2076 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2077 {
2078         int i, j, eq = 0, total_eqs = 0;
2079
2080         ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2081                                   sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2082         if (!ibdev->eq_table)
2083                 return;
2084
2085         for (i = 1; i <= dev->caps.num_ports; i++) {
2086                 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2087                      j++, total_eqs++) {
2088                         if (i > 1 &&  mlx4_is_eq_shared(dev, total_eqs))
2089                                 continue;
2090                         ibdev->eq_table[eq] = total_eqs;
2091                         if (!mlx4_assign_eq(dev, i,
2092                                             &ibdev->eq_table[eq]))
2093                                 eq++;
2094                         else
2095                                 ibdev->eq_table[eq] = -1;
2096                 }
2097         }
2098
2099         for (i = eq; i < dev->caps.num_comp_vectors;
2100              ibdev->eq_table[i++] = -1)
2101                 ;
2102
2103         /* Advertise the new number of EQs to clients */
2104         ibdev->ib_dev.num_comp_vectors = eq;
2105 }
2106
2107 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2108 {
2109         int i;
2110         int total_eqs = ibdev->ib_dev.num_comp_vectors;
2111
2112         /* no eqs were allocated */
2113         if (!ibdev->eq_table)
2114                 return;
2115
2116         /* Reset the advertised EQ number */
2117         ibdev->ib_dev.num_comp_vectors = 0;
2118
2119         for (i = 0; i < total_eqs; i++)
2120                 mlx4_release_eq(dev, ibdev->eq_table[i]);
2121
2122         kfree(ibdev->eq_table);
2123         ibdev->eq_table = NULL;
2124 }
2125
2126 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2127                                struct ib_port_immutable *immutable)
2128 {
2129         struct ib_port_attr attr;
2130         int err;
2131
2132         err = mlx4_ib_query_port(ibdev, port_num, &attr);
2133         if (err)
2134                 return err;
2135
2136         immutable->pkey_tbl_len = attr.pkey_tbl_len;
2137         immutable->gid_tbl_len = attr.gid_tbl_len;
2138
2139         if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND)
2140                 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2141         else
2142                 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2143
2144         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2145
2146         return 0;
2147 }
2148
2149 static void *mlx4_ib_add(struct mlx4_dev *dev)
2150 {
2151         struct mlx4_ib_dev *ibdev;
2152         int num_ports = 0;
2153         int i, j;
2154         int err;
2155         struct mlx4_ib_iboe *iboe;
2156         int ib_num_ports = 0;
2157         int num_req_counters;
2158         int allocated;
2159         u32 counter_index;
2160         struct counter_index *new_counter_index = NULL;
2161
2162         pr_info_once("%s", mlx4_ib_version);
2163
2164         num_ports = 0;
2165         mlx4_foreach_ib_transport_port(i, dev)
2166                 num_ports++;
2167
2168         /* No point in registering a device with no ports... */
2169         if (num_ports == 0)
2170                 return NULL;
2171
2172         ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2173         if (!ibdev) {
2174                 dev_err(&dev->persist->pdev->dev,
2175                         "Device struct alloc failed\n");
2176                 return NULL;
2177         }
2178
2179         iboe = &ibdev->iboe;
2180
2181         if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2182                 goto err_dealloc;
2183
2184         if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2185                 goto err_pd;
2186
2187         ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2188                                  PAGE_SIZE);
2189         if (!ibdev->uar_map)
2190                 goto err_uar;
2191         MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2192
2193         ibdev->dev = dev;
2194         ibdev->bond_next_port   = 0;
2195
2196         strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2197         ibdev->ib_dev.owner             = THIS_MODULE;
2198         ibdev->ib_dev.node_type         = RDMA_NODE_IB_CA;
2199         ibdev->ib_dev.local_dma_lkey    = dev->caps.reserved_lkey;
2200         ibdev->num_ports                = num_ports;
2201         ibdev->ib_dev.phys_port_cnt     = mlx4_is_bonded(dev) ?
2202                                                 1 : ibdev->num_ports;
2203         ibdev->ib_dev.num_comp_vectors  = dev->caps.num_comp_vectors;
2204         ibdev->ib_dev.dma_device        = &dev->persist->pdev->dev;
2205         ibdev->ib_dev.get_netdev        = mlx4_ib_get_netdev;
2206         ibdev->ib_dev.add_gid           = mlx4_ib_add_gid;
2207         ibdev->ib_dev.del_gid           = mlx4_ib_del_gid;
2208
2209         if (dev->caps.userspace_caps)
2210                 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2211         else
2212                 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2213
2214         ibdev->ib_dev.uverbs_cmd_mask   =
2215                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
2216                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
2217                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
2218                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
2219                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
2220                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
2221                 (1ull << IB_USER_VERBS_CMD_REREG_MR)            |
2222                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
2223                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2224                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
2225                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
2226                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
2227                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
2228                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
2229                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
2230                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
2231                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
2232                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
2233                 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
2234                 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
2235                 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
2236                 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
2237                 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
2238                 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
2239
2240         ibdev->ib_dev.query_device      = mlx4_ib_query_device;
2241         ibdev->ib_dev.query_port        = mlx4_ib_query_port;
2242         ibdev->ib_dev.get_link_layer    = mlx4_ib_port_link_layer;
2243         ibdev->ib_dev.query_gid         = mlx4_ib_query_gid;
2244         ibdev->ib_dev.query_pkey        = mlx4_ib_query_pkey;
2245         ibdev->ib_dev.modify_device     = mlx4_ib_modify_device;
2246         ibdev->ib_dev.modify_port       = mlx4_ib_modify_port;
2247         ibdev->ib_dev.alloc_ucontext    = mlx4_ib_alloc_ucontext;
2248         ibdev->ib_dev.dealloc_ucontext  = mlx4_ib_dealloc_ucontext;
2249         ibdev->ib_dev.mmap              = mlx4_ib_mmap;
2250         ibdev->ib_dev.alloc_pd          = mlx4_ib_alloc_pd;
2251         ibdev->ib_dev.dealloc_pd        = mlx4_ib_dealloc_pd;
2252         ibdev->ib_dev.create_ah         = mlx4_ib_create_ah;
2253         ibdev->ib_dev.query_ah          = mlx4_ib_query_ah;
2254         ibdev->ib_dev.destroy_ah        = mlx4_ib_destroy_ah;
2255         ibdev->ib_dev.create_srq        = mlx4_ib_create_srq;
2256         ibdev->ib_dev.modify_srq        = mlx4_ib_modify_srq;
2257         ibdev->ib_dev.query_srq         = mlx4_ib_query_srq;
2258         ibdev->ib_dev.destroy_srq       = mlx4_ib_destroy_srq;
2259         ibdev->ib_dev.post_srq_recv     = mlx4_ib_post_srq_recv;
2260         ibdev->ib_dev.create_qp         = mlx4_ib_create_qp;
2261         ibdev->ib_dev.modify_qp         = mlx4_ib_modify_qp;
2262         ibdev->ib_dev.query_qp          = mlx4_ib_query_qp;
2263         ibdev->ib_dev.destroy_qp        = mlx4_ib_destroy_qp;
2264         ibdev->ib_dev.post_send         = mlx4_ib_post_send;
2265         ibdev->ib_dev.post_recv         = mlx4_ib_post_recv;
2266         ibdev->ib_dev.create_cq         = mlx4_ib_create_cq;
2267         ibdev->ib_dev.modify_cq         = mlx4_ib_modify_cq;
2268         ibdev->ib_dev.resize_cq         = mlx4_ib_resize_cq;
2269         ibdev->ib_dev.destroy_cq        = mlx4_ib_destroy_cq;
2270         ibdev->ib_dev.poll_cq           = mlx4_ib_poll_cq;
2271         ibdev->ib_dev.req_notify_cq     = mlx4_ib_arm_cq;
2272         ibdev->ib_dev.get_dma_mr        = mlx4_ib_get_dma_mr;
2273         ibdev->ib_dev.reg_user_mr       = mlx4_ib_reg_user_mr;
2274         ibdev->ib_dev.rereg_user_mr     = mlx4_ib_rereg_user_mr;
2275         ibdev->ib_dev.dereg_mr          = mlx4_ib_dereg_mr;
2276         ibdev->ib_dev.alloc_mr          = mlx4_ib_alloc_mr;
2277         ibdev->ib_dev.map_mr_sg         = mlx4_ib_map_mr_sg;
2278         ibdev->ib_dev.attach_mcast      = mlx4_ib_mcg_attach;
2279         ibdev->ib_dev.detach_mcast      = mlx4_ib_mcg_detach;
2280         ibdev->ib_dev.process_mad       = mlx4_ib_process_mad;
2281         ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
2282         ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
2283
2284         if (!mlx4_is_slave(ibdev->dev)) {
2285                 ibdev->ib_dev.alloc_fmr         = mlx4_ib_fmr_alloc;
2286                 ibdev->ib_dev.map_phys_fmr      = mlx4_ib_map_phys_fmr;
2287                 ibdev->ib_dev.unmap_fmr         = mlx4_ib_unmap_fmr;
2288                 ibdev->ib_dev.dealloc_fmr       = mlx4_ib_fmr_dealloc;
2289         }
2290
2291         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2292             dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2293                 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
2294                 ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
2295                 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2296
2297                 ibdev->ib_dev.uverbs_cmd_mask |=
2298                         (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2299                         (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2300         }
2301
2302         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2303                 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2304                 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2305                 ibdev->ib_dev.uverbs_cmd_mask |=
2306                         (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2307                         (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2308         }
2309
2310         if (check_flow_steering_support(dev)) {
2311                 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2312                 ibdev->ib_dev.create_flow       = mlx4_ib_create_flow;
2313                 ibdev->ib_dev.destroy_flow      = mlx4_ib_destroy_flow;
2314
2315                 ibdev->ib_dev.uverbs_ex_cmd_mask        |=
2316                         (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2317                         (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2318         }
2319
2320         ibdev->ib_dev.uverbs_ex_cmd_mask |=
2321                 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
2322                 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2323                 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
2324
2325         mlx4_ib_alloc_eqs(dev, ibdev);
2326
2327         spin_lock_init(&iboe->lock);
2328
2329         if (init_node_data(ibdev))
2330                 goto err_map;
2331
2332         for (i = 0; i < ibdev->num_ports; ++i) {
2333                 mutex_init(&ibdev->counters_table[i].mutex);
2334                 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2335         }
2336
2337         num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2338         for (i = 0; i < num_req_counters; ++i) {
2339                 mutex_init(&ibdev->qp1_proxy_lock[i]);
2340                 allocated = 0;
2341                 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2342                                                 IB_LINK_LAYER_ETHERNET) {
2343                         err = mlx4_counter_alloc(ibdev->dev, &counter_index);
2344                         /* if failed to allocate a new counter, use default */
2345                         if (err)
2346                                 counter_index =
2347                                         mlx4_get_default_counter_index(dev,
2348                                                                        i + 1);
2349                         else
2350                                 allocated = 1;
2351                 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2352                         counter_index = mlx4_get_default_counter_index(dev,
2353                                                                        i + 1);
2354                 }
2355                 new_counter_index = kmalloc(sizeof(*new_counter_index),
2356                                             GFP_KERNEL);
2357                 if (!new_counter_index) {
2358                         if (allocated)
2359                                 mlx4_counter_free(ibdev->dev, counter_index);
2360                         goto err_counter;
2361                 }
2362                 new_counter_index->index = counter_index;
2363                 new_counter_index->allocated = allocated;
2364                 list_add_tail(&new_counter_index->list,
2365                               &ibdev->counters_table[i].counters_list);
2366                 ibdev->counters_table[i].default_counter = counter_index;
2367                 pr_info("counter index %d for port %d allocated %d\n",
2368                         counter_index, i + 1, allocated);
2369         }
2370         if (mlx4_is_bonded(dev))
2371                 for (i = 1; i < ibdev->num_ports ; ++i) {
2372                         new_counter_index =
2373                                         kmalloc(sizeof(struct counter_index),
2374                                                 GFP_KERNEL);
2375                         if (!new_counter_index)
2376                                 goto err_counter;
2377                         new_counter_index->index = counter_index;
2378                         new_counter_index->allocated = 0;
2379                         list_add_tail(&new_counter_index->list,
2380                                       &ibdev->counters_table[i].counters_list);
2381                         ibdev->counters_table[i].default_counter =
2382                                                                 counter_index;
2383                 }
2384
2385         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2386                 ib_num_ports++;
2387
2388         spin_lock_init(&ibdev->sm_lock);
2389         mutex_init(&ibdev->cap_mask_mutex);
2390         INIT_LIST_HEAD(&ibdev->qp_list);
2391         spin_lock_init(&ibdev->reset_flow_resource_lock);
2392
2393         if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2394             ib_num_ports) {
2395                 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2396                 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2397                                             MLX4_IB_UC_STEER_QPN_ALIGN,
2398                                             &ibdev->steer_qpn_base, 0);
2399                 if (err)
2400                         goto err_counter;
2401
2402                 ibdev->ib_uc_qpns_bitmap =
2403                         kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2404                                 sizeof(long),
2405                                 GFP_KERNEL);
2406                 if (!ibdev->ib_uc_qpns_bitmap) {
2407                         dev_err(&dev->persist->pdev->dev,
2408                                 "bit map alloc failed\n");
2409                         goto err_steer_qp_release;
2410                 }
2411
2412                 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2413                         bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2414                                     ibdev->steer_qpn_count);
2415                         err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2416                                         dev, ibdev->steer_qpn_base,
2417                                         ibdev->steer_qpn_base +
2418                                         ibdev->steer_qpn_count - 1);
2419                         if (err)
2420                                 goto err_steer_free_bitmap;
2421                 } else {
2422                         bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2423                                     ibdev->steer_qpn_count);
2424                 }
2425         }
2426
2427         for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2428                 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2429
2430         if (ib_register_device(&ibdev->ib_dev, NULL))
2431                 goto err_steer_free_bitmap;
2432
2433         if (mlx4_ib_mad_init(ibdev))
2434                 goto err_reg;
2435
2436         if (mlx4_ib_init_sriov(ibdev))
2437                 goto err_mad;
2438
2439         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
2440                 if (!iboe->nb.notifier_call) {
2441                         iboe->nb.notifier_call = mlx4_ib_netdev_event;
2442                         err = register_netdevice_notifier(&iboe->nb);
2443                         if (err) {
2444                                 iboe->nb.notifier_call = NULL;
2445                                 goto err_notif;
2446                         }
2447                 }
2448         }
2449
2450         for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2451                 if (device_create_file(&ibdev->ib_dev.dev,
2452                                        mlx4_class_attributes[j]))
2453                         goto err_notif;
2454         }
2455
2456         ibdev->ib_active = true;
2457
2458         if (mlx4_is_mfunc(ibdev->dev))
2459                 init_pkeys(ibdev);
2460
2461         /* create paravirt contexts for any VFs which are active */
2462         if (mlx4_is_master(ibdev->dev)) {
2463                 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2464                         if (j == mlx4_master_func_num(ibdev->dev))
2465                                 continue;
2466                         if (mlx4_is_slave_active(ibdev->dev, j))
2467                                 do_slave_init(ibdev, j, 1);
2468                 }
2469         }
2470         return ibdev;
2471
2472 err_notif:
2473         if (ibdev->iboe.nb.notifier_call) {
2474                 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2475                         pr_warn("failure unregistering notifier\n");
2476                 ibdev->iboe.nb.notifier_call = NULL;
2477         }
2478         flush_workqueue(wq);
2479
2480         mlx4_ib_close_sriov(ibdev);
2481
2482 err_mad:
2483         mlx4_ib_mad_cleanup(ibdev);
2484
2485 err_reg:
2486         ib_unregister_device(&ibdev->ib_dev);
2487
2488 err_steer_free_bitmap:
2489         kfree(ibdev->ib_uc_qpns_bitmap);
2490
2491 err_steer_qp_release:
2492         mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2493                               ibdev->steer_qpn_count);
2494 err_counter:
2495         for (i = 0; i < ibdev->num_ports; ++i)
2496                 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2497
2498 err_map:
2499         mlx4_ib_free_eqs(dev, ibdev);
2500         iounmap(ibdev->uar_map);
2501
2502 err_uar:
2503         mlx4_uar_free(dev, &ibdev->priv_uar);
2504
2505 err_pd:
2506         mlx4_pd_free(dev, ibdev->priv_pdn);
2507
2508 err_dealloc:
2509         ib_dealloc_device(&ibdev->ib_dev);
2510
2511         return NULL;
2512 }
2513
2514 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2515 {
2516         int offset;
2517
2518         WARN_ON(!dev->ib_uc_qpns_bitmap);
2519
2520         offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2521                                          dev->steer_qpn_count,
2522                                          get_count_order(count));
2523         if (offset < 0)
2524                 return offset;
2525
2526         *qpn = dev->steer_qpn_base + offset;
2527         return 0;
2528 }
2529
2530 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2531 {
2532         if (!qpn ||
2533             dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2534                 return;
2535
2536         BUG_ON(qpn < dev->steer_qpn_base);
2537
2538         bitmap_release_region(dev->ib_uc_qpns_bitmap,
2539                               qpn - dev->steer_qpn_base,
2540                               get_count_order(count));
2541 }
2542
2543 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2544                          int is_attach)
2545 {
2546         int err;
2547         size_t flow_size;
2548         struct ib_flow_attr *flow = NULL;
2549         struct ib_flow_spec_ib *ib_spec;
2550
2551         if (is_attach) {
2552                 flow_size = sizeof(struct ib_flow_attr) +
2553                             sizeof(struct ib_flow_spec_ib);
2554                 flow = kzalloc(flow_size, GFP_KERNEL);
2555                 if (!flow)
2556                         return -ENOMEM;
2557                 flow->port = mqp->port;
2558                 flow->num_of_specs = 1;
2559                 flow->size = flow_size;
2560                 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2561                 ib_spec->type = IB_FLOW_SPEC_IB;
2562                 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2563                 /* Add an empty rule for IB L2 */
2564                 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2565
2566                 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
2567                                             IB_FLOW_DOMAIN_NIC,
2568                                             MLX4_FS_REGULAR,
2569                                             &mqp->reg_id);
2570         } else {
2571                 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2572         }
2573         kfree(flow);
2574         return err;
2575 }
2576
2577 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2578 {
2579         struct mlx4_ib_dev *ibdev = ibdev_ptr;
2580         int p;
2581
2582         ibdev->ib_active = false;
2583         flush_workqueue(wq);
2584
2585         mlx4_ib_close_sriov(ibdev);
2586         mlx4_ib_mad_cleanup(ibdev);
2587         ib_unregister_device(&ibdev->ib_dev);
2588         if (ibdev->iboe.nb.notifier_call) {
2589                 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2590                         pr_warn("failure unregistering notifier\n");
2591                 ibdev->iboe.nb.notifier_call = NULL;
2592         }
2593
2594         mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2595                               ibdev->steer_qpn_count);
2596         kfree(ibdev->ib_uc_qpns_bitmap);
2597
2598         iounmap(ibdev->uar_map);
2599         for (p = 0; p < ibdev->num_ports; ++p)
2600                 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
2601
2602         mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2603                 mlx4_CLOSE_PORT(dev, p);
2604
2605         mlx4_ib_free_eqs(dev, ibdev);
2606
2607         mlx4_uar_free(dev, &ibdev->priv_uar);
2608         mlx4_pd_free(dev, ibdev->priv_pdn);
2609         ib_dealloc_device(&ibdev->ib_dev);
2610 }
2611
2612 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2613 {
2614         struct mlx4_ib_demux_work **dm = NULL;
2615         struct mlx4_dev *dev = ibdev->dev;
2616         int i;
2617         unsigned long flags;
2618         struct mlx4_active_ports actv_ports;
2619         unsigned int ports;
2620         unsigned int first_port;
2621
2622         if (!mlx4_is_master(dev))
2623                 return;
2624
2625         actv_ports = mlx4_get_active_ports(dev, slave);
2626         ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2627         first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2628
2629         dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
2630         if (!dm) {
2631                 pr_err("failed to allocate memory for tunneling qp update\n");
2632                 return;
2633         }
2634
2635         for (i = 0; i < ports; i++) {
2636                 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2637                 if (!dm[i]) {
2638                         pr_err("failed to allocate memory for tunneling qp update work struct\n");
2639                         while (--i >= 0)
2640                                 kfree(dm[i]);
2641                         goto out;
2642                 }
2643                 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2644                 dm[i]->port = first_port + i + 1;
2645                 dm[i]->slave = slave;
2646                 dm[i]->do_init = do_init;
2647                 dm[i]->dev = ibdev;
2648         }
2649         /* initialize or tear down tunnel QPs for the slave */
2650         spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2651         if (!ibdev->sriov.is_going_down) {
2652                 for (i = 0; i < ports; i++)
2653                         queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2654                 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2655         } else {
2656                 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2657                 for (i = 0; i < ports; i++)
2658                         kfree(dm[i]);
2659         }
2660 out:
2661         kfree(dm);
2662         return;
2663 }
2664
2665 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
2666 {
2667         struct mlx4_ib_qp *mqp;
2668         unsigned long flags_qp;
2669         unsigned long flags_cq;
2670         struct mlx4_ib_cq *send_mcq, *recv_mcq;
2671         struct list_head    cq_notify_list;
2672         struct mlx4_cq *mcq;
2673         unsigned long flags;
2674
2675         pr_warn("mlx4_ib_handle_catas_error was started\n");
2676         INIT_LIST_HEAD(&cq_notify_list);
2677
2678         /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2679         spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
2680
2681         list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
2682                 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
2683                 if (mqp->sq.tail != mqp->sq.head) {
2684                         send_mcq = to_mcq(mqp->ibqp.send_cq);
2685                         spin_lock_irqsave(&send_mcq->lock, flags_cq);
2686                         if (send_mcq->mcq.comp &&
2687                             mqp->ibqp.send_cq->comp_handler) {
2688                                 if (!send_mcq->mcq.reset_notify_added) {
2689                                         send_mcq->mcq.reset_notify_added = 1;
2690                                         list_add_tail(&send_mcq->mcq.reset_notify,
2691                                                       &cq_notify_list);
2692                                 }
2693                         }
2694                         spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
2695                 }
2696                 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
2697                 /* Now, handle the QP's receive queue */
2698                 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
2699                 /* no handling is needed for SRQ */
2700                 if (!mqp->ibqp.srq) {
2701                         if (mqp->rq.tail != mqp->rq.head) {
2702                                 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
2703                                 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
2704                                 if (recv_mcq->mcq.comp &&
2705                                     mqp->ibqp.recv_cq->comp_handler) {
2706                                         if (!recv_mcq->mcq.reset_notify_added) {
2707                                                 recv_mcq->mcq.reset_notify_added = 1;
2708                                                 list_add_tail(&recv_mcq->mcq.reset_notify,
2709                                                               &cq_notify_list);
2710                                         }
2711                                 }
2712                                 spin_unlock_irqrestore(&recv_mcq->lock,
2713                                                        flags_cq);
2714                         }
2715                 }
2716                 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
2717         }
2718
2719         list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
2720                 mcq->comp(mcq);
2721         }
2722         spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2723         pr_warn("mlx4_ib_handle_catas_error ended\n");
2724 }
2725
2726 static void handle_bonded_port_state_event(struct work_struct *work)
2727 {
2728         struct ib_event_work *ew =
2729                 container_of(work, struct ib_event_work, work);
2730         struct mlx4_ib_dev *ibdev = ew->ib_dev;
2731         enum ib_port_state bonded_port_state = IB_PORT_NOP;
2732         int i;
2733         struct ib_event ibev;
2734
2735         kfree(ew);
2736         spin_lock_bh(&ibdev->iboe.lock);
2737         for (i = 0; i < MLX4_MAX_PORTS; ++i) {
2738                 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
2739                 enum ib_port_state curr_port_state;
2740
2741                 if (!curr_netdev)
2742                         continue;
2743
2744                 curr_port_state =
2745                         (netif_running(curr_netdev) &&
2746                          netif_carrier_ok(curr_netdev)) ?
2747                         IB_PORT_ACTIVE : IB_PORT_DOWN;
2748
2749                 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
2750                         curr_port_state : IB_PORT_ACTIVE;
2751         }
2752         spin_unlock_bh(&ibdev->iboe.lock);
2753
2754         ibev.device = &ibdev->ib_dev;
2755         ibev.element.port_num = 1;
2756         ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
2757                 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2758
2759         ib_dispatch_event(&ibev);
2760 }
2761
2762 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2763                           enum mlx4_dev_event event, unsigned long param)
2764 {
2765         struct ib_event ibev;
2766         struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
2767         struct mlx4_eqe *eqe = NULL;
2768         struct ib_event_work *ew;
2769         int p = 0;
2770
2771         if (mlx4_is_bonded(dev) &&
2772             ((event == MLX4_DEV_EVENT_PORT_UP) ||
2773             (event == MLX4_DEV_EVENT_PORT_DOWN))) {
2774                 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
2775                 if (!ew)
2776                         return;
2777                 INIT_WORK(&ew->work, handle_bonded_port_state_event);
2778                 ew->ib_dev = ibdev;
2779                 queue_work(wq, &ew->work);
2780                 return;
2781         }
2782
2783         if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2784                 eqe = (struct mlx4_eqe *)param;
2785         else
2786                 p = (int) param;
2787
2788         switch (event) {
2789         case MLX4_DEV_EVENT_PORT_UP:
2790                 if (p > ibdev->num_ports)
2791                         return;
2792                 if (mlx4_is_master(dev) &&
2793                     rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2794                         IB_LINK_LAYER_INFINIBAND) {
2795                         mlx4_ib_invalidate_all_guid_record(ibdev, p);
2796                 }
2797                 ibev.event = IB_EVENT_PORT_ACTIVE;
2798                 break;
2799
2800         case MLX4_DEV_EVENT_PORT_DOWN:
2801                 if (p > ibdev->num_ports)
2802                         return;
2803                 ibev.event = IB_EVENT_PORT_ERR;
2804                 break;
2805
2806         case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
2807                 ibdev->ib_active = false;
2808                 ibev.event = IB_EVENT_DEVICE_FATAL;
2809                 mlx4_ib_handle_catas_error(ibdev);
2810                 break;
2811
2812         case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2813                 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2814                 if (!ew) {
2815                         pr_err("failed to allocate memory for events work\n");
2816                         break;
2817                 }
2818
2819                 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2820                 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2821                 ew->ib_dev = ibdev;
2822                 /* need to queue only for port owner, which uses GEN_EQE */
2823                 if (mlx4_is_master(dev))
2824                         queue_work(wq, &ew->work);
2825                 else
2826                         handle_port_mgmt_change_event(&ew->work);
2827                 return;
2828
2829         case MLX4_DEV_EVENT_SLAVE_INIT:
2830                 /* here, p is the slave id */
2831                 do_slave_init(ibdev, p, 1);
2832                 if (mlx4_is_master(dev)) {
2833                         int i;
2834
2835                         for (i = 1; i <= ibdev->num_ports; i++) {
2836                                 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
2837                                         == IB_LINK_LAYER_INFINIBAND)
2838                                         mlx4_ib_slave_alias_guid_event(ibdev,
2839                                                                        p, i,
2840                                                                        1);
2841                         }
2842                 }
2843                 return;
2844
2845         case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
2846                 if (mlx4_is_master(dev)) {
2847                         int i;
2848
2849                         for (i = 1; i <= ibdev->num_ports; i++) {
2850                                 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
2851                                         == IB_LINK_LAYER_INFINIBAND)
2852                                         mlx4_ib_slave_alias_guid_event(ibdev,
2853                                                                        p, i,
2854                                                                        0);
2855                         }
2856                 }
2857                 /* here, p is the slave id */
2858                 do_slave_init(ibdev, p, 0);
2859                 return;
2860
2861         default:
2862                 return;
2863         }
2864
2865         ibev.device           = ibdev_ptr;
2866         ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
2867
2868         ib_dispatch_event(&ibev);
2869 }
2870
2871 static struct mlx4_interface mlx4_ib_interface = {
2872         .add            = mlx4_ib_add,
2873         .remove         = mlx4_ib_remove,
2874         .event          = mlx4_ib_event,
2875         .protocol       = MLX4_PROT_IB_IPV6,
2876         .flags          = MLX4_INTFF_BONDING
2877 };
2878
2879 static int __init mlx4_ib_init(void)
2880 {
2881         int err;
2882
2883         wq = create_singlethread_workqueue("mlx4_ib");
2884         if (!wq)
2885                 return -ENOMEM;
2886
2887         err = mlx4_ib_mcg_init();
2888         if (err)
2889                 goto clean_wq;
2890
2891         err = mlx4_register_interface(&mlx4_ib_interface);
2892         if (err)
2893                 goto clean_mcg;
2894
2895         return 0;
2896
2897 clean_mcg:
2898         mlx4_ib_mcg_destroy();
2899
2900 clean_wq:
2901         destroy_workqueue(wq);
2902         return err;
2903 }
2904
2905 static void __exit mlx4_ib_cleanup(void)
2906 {
2907         mlx4_unregister_interface(&mlx4_ib_interface);
2908         mlx4_ib_mcg_destroy();
2909         destroy_workqueue(wq);
2910 }
2911
2912 module_init(mlx4_ib_init);
2913 module_exit(mlx4_ib_cleanup);