2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <linux/sched/mm.h>
43 #include <linux/sched/task.h>
46 #include <net/addrconf.h>
47 #include <net/devlink.h>
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_cache.h>
54 #include <net/bonding.h>
56 #include <linux/mlx4/driver.h>
57 #include <linux/mlx4/cmd.h>
58 #include <linux/mlx4/qp.h>
61 #include <rdma/mlx4-abi.h>
63 #define DRV_NAME MLX4_IB_DRV_NAME
64 #define DRV_VERSION "4.0-0"
66 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
67 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
68 #define MLX4_IB_CARD_REV_A0 0xA0
70 MODULE_AUTHOR("Roland Dreier");
71 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72 MODULE_LICENSE("Dual BSD/GPL");
74 int mlx4_ib_sm_guid_assign = 0;
75 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
76 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
78 static const char mlx4_ib_version[] =
79 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
83 static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
86 static struct workqueue_struct *wq;
88 static void init_query_mad(struct ib_smp *mad)
90 mad->base_version = 1;
91 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92 mad->class_version = 1;
93 mad->method = IB_MGMT_METHOD_GET;
96 static int check_flow_steering_support(struct mlx4_dev *dev)
98 int eth_num_ports = 0;
101 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
105 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
107 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
109 dmfs &= (!ib_num_ports ||
110 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
112 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
113 if (ib_num_ports && mlx4_is_mfunc(dev)) {
114 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
121 static int num_ib_ports(struct mlx4_dev *dev)
126 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
132 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
134 struct mlx4_ib_dev *ibdev = to_mdev(device);
135 struct net_device *dev;
138 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
141 if (mlx4_is_bonded(ibdev->dev)) {
142 struct net_device *upper = NULL;
144 upper = netdev_master_upper_dev_get_rcu(dev);
146 struct net_device *active;
148 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
161 static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
162 struct mlx4_ib_dev *ibdev,
165 struct mlx4_cmd_mailbox *mailbox;
167 struct mlx4_dev *dev = ibdev->dev;
169 union ib_gid *gid_tbl;
171 mailbox = mlx4_alloc_cmd_mailbox(dev);
175 gid_tbl = mailbox->buf;
177 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
178 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
180 err = mlx4_cmd(dev, mailbox->dma,
181 MLX4_SET_PORT_GID_TABLE << 8 | port_num,
182 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
184 if (mlx4_is_bonded(dev))
185 err += mlx4_cmd(dev, mailbox->dma,
186 MLX4_SET_PORT_GID_TABLE << 8 | 2,
187 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
190 mlx4_free_cmd_mailbox(dev, mailbox);
194 static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
195 struct mlx4_ib_dev *ibdev,
198 struct mlx4_cmd_mailbox *mailbox;
200 struct mlx4_dev *dev = ibdev->dev;
211 mailbox = mlx4_alloc_cmd_mailbox(dev);
215 gid_tbl = mailbox->buf;
216 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
217 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
218 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
219 gid_tbl[i].version = 2;
220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
225 err = mlx4_cmd(dev, mailbox->dma,
226 MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
227 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
229 if (mlx4_is_bonded(dev))
230 err += mlx4_cmd(dev, mailbox->dma,
231 MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
232 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
235 mlx4_free_cmd_mailbox(dev, mailbox);
239 static int mlx4_ib_update_gids(struct gid_entry *gids,
240 struct mlx4_ib_dev *ibdev,
243 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
244 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
246 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
249 static void free_gid_entry(struct gid_entry *entry)
251 memset(&entry->gid, 0, sizeof(entry->gid));
256 static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
258 struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
259 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
260 struct mlx4_port_gid_table *port_gid_table;
261 int free = -1, found = -1;
265 struct gid_entry *gids = NULL;
267 if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
270 if (attr->port_num > MLX4_MAX_PORTS)
276 port_gid_table = &iboe->gids[attr->port_num - 1];
277 spin_lock_bh(&iboe->lock);
278 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
279 if (!memcmp(&port_gid_table->gids[i].gid,
280 &attr->gid, sizeof(attr->gid)) &&
281 port_gid_table->gids[i].gid_type == attr->gid_type) {
285 if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
286 free = i; /* HW has space */
293 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
294 if (!port_gid_table->gids[free].ctx) {
297 *context = port_gid_table->gids[free].ctx;
298 memcpy(&port_gid_table->gids[free].gid,
299 &attr->gid, sizeof(attr->gid));
300 port_gid_table->gids[free].gid_type = attr->gid_type;
301 port_gid_table->gids[free].ctx->real_index = free;
302 port_gid_table->gids[free].ctx->refcount = 1;
307 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
311 if (!ret && hw_update) {
312 gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
317 free_gid_entry(&port_gid_table->gids[free]);
319 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
320 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
321 gids[i].gid_type = port_gid_table->gids[i].gid_type;
325 spin_unlock_bh(&iboe->lock);
327 if (!ret && hw_update) {
328 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
330 spin_lock_bh(&iboe->lock);
332 free_gid_entry(&port_gid_table->gids[free]);
333 spin_unlock_bh(&iboe->lock);
341 static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
343 struct gid_cache_context *ctx = *context;
344 struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
345 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
346 struct mlx4_port_gid_table *port_gid_table;
349 struct gid_entry *gids = NULL;
351 if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
354 if (attr->port_num > MLX4_MAX_PORTS)
357 port_gid_table = &iboe->gids[attr->port_num - 1];
358 spin_lock_bh(&iboe->lock);
361 if (!ctx->refcount) {
362 unsigned int real_index = ctx->real_index;
364 free_gid_entry(&port_gid_table->gids[real_index]);
368 if (!ret && hw_update) {
371 gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
376 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
378 &port_gid_table->gids[i].gid,
379 sizeof(union ib_gid));
381 port_gid_table->gids[i].gid_type;
385 spin_unlock_bh(&iboe->lock);
387 if (!ret && hw_update) {
388 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
394 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
395 const struct ib_gid_attr *attr)
397 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
398 struct gid_cache_context *ctx = NULL;
399 struct mlx4_port_gid_table *port_gid_table;
400 int real_index = -EINVAL;
403 u8 port_num = attr->port_num;
405 if (port_num > MLX4_MAX_PORTS)
408 if (mlx4_is_bonded(ibdev->dev))
411 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
414 spin_lock_irqsave(&iboe->lock, flags);
415 port_gid_table = &iboe->gids[port_num - 1];
417 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
418 if (!memcmp(&port_gid_table->gids[i].gid,
419 &attr->gid, sizeof(attr->gid)) &&
420 attr->gid_type == port_gid_table->gids[i].gid_type) {
421 ctx = port_gid_table->gids[i].ctx;
425 real_index = ctx->real_index;
426 spin_unlock_irqrestore(&iboe->lock, flags);
430 #define field_avail(type, fld, sz) (offsetof(type, fld) + \
431 sizeof(((type *)0)->fld) <= (sz))
433 static int mlx4_ib_query_device(struct ib_device *ibdev,
434 struct ib_device_attr *props,
435 struct ib_udata *uhw)
437 struct mlx4_ib_dev *dev = to_mdev(ibdev);
438 struct ib_smp *in_mad = NULL;
439 struct ib_smp *out_mad = NULL;
442 struct mlx4_uverbs_ex_query_device cmd;
443 struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
444 struct mlx4_clock_params clock_params;
447 if (uhw->inlen < sizeof(cmd))
450 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
461 resp.response_length = offsetof(typeof(resp), response_length) +
462 sizeof(resp.response_length);
463 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
464 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
466 if (!in_mad || !out_mad)
469 init_query_mad(in_mad);
470 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
472 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
473 1, NULL, NULL, in_mad, out_mad);
477 memset(props, 0, sizeof *props);
479 have_ib_ports = num_ib_ports(dev->dev);
481 props->fw_ver = dev->dev->caps.fw_ver;
482 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
483 IB_DEVICE_PORT_ACTIVE_EVENT |
484 IB_DEVICE_SYS_IMAGE_GUID |
485 IB_DEVICE_RC_RNR_NAK_GEN |
486 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
487 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
488 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
489 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
490 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
491 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
492 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
493 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
494 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
495 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
496 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
497 if (dev->dev->caps.max_gso_sz &&
498 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
499 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
500 props->device_cap_flags |= IB_DEVICE_UD_TSO;
501 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
502 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
503 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
504 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
505 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
506 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
507 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
508 props->device_cap_flags |= IB_DEVICE_XRC;
509 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
510 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
511 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
512 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
513 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
515 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
517 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
518 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
520 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
522 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
524 props->vendor_part_id = dev->dev->persist->pdev->device;
525 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
526 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
528 props->max_mr_size = ~0ull;
529 props->page_size_cap = dev->dev->caps.page_size_cap;
530 props->max_qp = dev->dev->quotas.qp;
531 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
532 props->max_send_sge =
533 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
534 props->max_recv_sge =
535 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
536 props->max_sge_rd = MLX4_MAX_SGE_RD;
537 props->max_cq = dev->dev->quotas.cq;
538 props->max_cqe = dev->dev->caps.max_cqes;
539 props->max_mr = dev->dev->quotas.mpt;
540 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
541 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
542 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
543 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
544 props->max_srq = dev->dev->quotas.srq;
545 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
546 props->max_srq_sge = dev->dev->caps.max_srq_sge;
547 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
548 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
549 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
550 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
551 props->masked_atomic_cap = props->atomic_cap;
552 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
553 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
554 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
555 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
556 props->max_mcast_grp;
557 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
558 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
559 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
560 props->max_ah = INT_MAX;
562 if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
563 mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
564 if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
565 props->rss_caps.max_rwq_indirection_tables =
567 props->rss_caps.max_rwq_indirection_table_size =
568 dev->dev->caps.max_rss_tbl_sz;
569 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
570 props->max_wq_type_rq = props->max_qp;
573 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
574 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
577 props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
578 props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
580 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
581 resp.response_length += sizeof(resp.hca_core_clock_offset);
582 if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
583 resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
584 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
588 if (uhw->outlen >= resp.response_length +
589 sizeof(resp.max_inl_recv_sz)) {
590 resp.response_length += sizeof(resp.max_inl_recv_sz);
591 resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg *
592 sizeof(struct mlx4_wqe_data_seg);
595 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
596 if (props->rss_caps.supported_qpts) {
597 resp.rss_caps.rx_hash_function =
598 MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
600 resp.rss_caps.rx_hash_fields_mask =
601 MLX4_IB_RX_HASH_SRC_IPV4 |
602 MLX4_IB_RX_HASH_DST_IPV4 |
603 MLX4_IB_RX_HASH_SRC_IPV6 |
604 MLX4_IB_RX_HASH_DST_IPV6 |
605 MLX4_IB_RX_HASH_SRC_PORT_TCP |
606 MLX4_IB_RX_HASH_DST_PORT_TCP |
607 MLX4_IB_RX_HASH_SRC_PORT_UDP |
608 MLX4_IB_RX_HASH_DST_PORT_UDP;
610 if (dev->dev->caps.tunnel_offload_mode ==
611 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
612 resp.rss_caps.rx_hash_fields_mask |=
613 MLX4_IB_RX_HASH_INNER;
615 resp.response_length = offsetof(typeof(resp), rss_caps) +
616 sizeof(resp.rss_caps);
619 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
620 if (dev->dev->caps.max_gso_sz &&
621 ((mlx4_ib_port_link_layer(ibdev, 1) ==
622 IB_LINK_LAYER_ETHERNET) ||
623 (mlx4_ib_port_link_layer(ibdev, 2) ==
624 IB_LINK_LAYER_ETHERNET))) {
625 resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz;
626 resp.tso_caps.supported_qpts |=
627 1 << IB_QPT_RAW_PACKET;
629 resp.response_length = offsetof(typeof(resp), tso_caps) +
630 sizeof(resp.tso_caps);
634 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
645 static enum rdma_link_layer
646 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
648 struct mlx4_dev *dev = to_mdev(device)->dev;
650 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
651 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
654 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
655 struct ib_port_attr *props, int netw_view)
657 struct ib_smp *in_mad = NULL;
658 struct ib_smp *out_mad = NULL;
659 int ext_active_speed;
660 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
663 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
664 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
665 if (!in_mad || !out_mad)
668 init_query_mad(in_mad);
669 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
670 in_mad->attr_mod = cpu_to_be32(port);
672 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
673 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
675 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
681 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
682 props->lmc = out_mad->data[34] & 0x7;
683 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
684 props->sm_sl = out_mad->data[36] & 0xf;
685 props->state = out_mad->data[32] & 0xf;
686 props->phys_state = out_mad->data[33] >> 4;
687 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
689 props->gid_tbl_len = out_mad->data[50];
691 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
692 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
693 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
694 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
695 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
696 props->active_width = out_mad->data[31] & 0xf;
697 props->active_speed = out_mad->data[35] >> 4;
698 props->max_mtu = out_mad->data[41] & 0xf;
699 props->active_mtu = out_mad->data[36] >> 4;
700 props->subnet_timeout = out_mad->data[51] & 0x1f;
701 props->max_vl_num = out_mad->data[37] >> 4;
702 props->init_type_reply = out_mad->data[41] >> 4;
704 /* Check if extended speeds (EDR/FDR/...) are supported */
705 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
706 ext_active_speed = out_mad->data[62] >> 4;
708 switch (ext_active_speed) {
710 props->active_speed = IB_SPEED_FDR;
713 props->active_speed = IB_SPEED_EDR;
718 /* If reported active speed is QDR, check if is FDR-10 */
719 if (props->active_speed == IB_SPEED_QDR) {
720 init_query_mad(in_mad);
721 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
722 in_mad->attr_mod = cpu_to_be32(port);
724 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
725 NULL, NULL, in_mad, out_mad);
729 /* Checking LinkSpeedActive for FDR-10 */
730 if (out_mad->data[15] & 0x1)
731 props->active_speed = IB_SPEED_FDR10;
734 /* Avoid wrong speed value returned by FW if the IB link is down. */
735 if (props->state == IB_PORT_DOWN)
736 props->active_speed = IB_SPEED_SDR;
744 static u8 state_to_phys_state(enum ib_port_state state)
746 return state == IB_PORT_ACTIVE ? 5 : 3;
749 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
750 struct ib_port_attr *props)
753 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
754 struct mlx4_ib_iboe *iboe = &mdev->iboe;
755 struct net_device *ndev;
757 struct mlx4_cmd_mailbox *mailbox;
759 int is_bonded = mlx4_is_bonded(mdev->dev);
761 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
763 return PTR_ERR(mailbox);
765 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
766 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
771 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
772 (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
773 IB_WIDTH_4X : IB_WIDTH_1X;
774 props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
775 IB_SPEED_FDR : IB_SPEED_QDR;
776 props->port_cap_flags = IB_PORT_CM_SUP;
777 props->ip_gids = true;
778 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
779 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
780 props->pkey_tbl_len = 1;
781 props->max_mtu = IB_MTU_4096;
782 props->max_vl_num = 2;
783 props->state = IB_PORT_DOWN;
784 props->phys_state = state_to_phys_state(props->state);
785 props->active_mtu = IB_MTU_256;
786 spin_lock_bh(&iboe->lock);
787 ndev = iboe->netdevs[port - 1];
788 if (ndev && is_bonded) {
789 rcu_read_lock(); /* required to get upper dev */
790 ndev = netdev_master_upper_dev_get_rcu(ndev);
796 tmp = iboe_get_mtu(ndev->mtu);
797 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
799 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
800 IB_PORT_ACTIVE : IB_PORT_DOWN;
801 props->phys_state = state_to_phys_state(props->state);
803 spin_unlock_bh(&iboe->lock);
805 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
809 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
810 struct ib_port_attr *props, int netw_view)
814 /* props being zeroed by the caller, avoid zeroing it here */
816 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
817 ib_link_query_port(ibdev, port, props, netw_view) :
818 eth_link_query_port(ibdev, port, props);
823 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
824 struct ib_port_attr *props)
826 /* returns host view */
827 return __mlx4_ib_query_port(ibdev, port, props, 0);
830 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
831 union ib_gid *gid, int netw_view)
833 struct ib_smp *in_mad = NULL;
834 struct ib_smp *out_mad = NULL;
836 struct mlx4_ib_dev *dev = to_mdev(ibdev);
838 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
840 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
841 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
842 if (!in_mad || !out_mad)
845 init_query_mad(in_mad);
846 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
847 in_mad->attr_mod = cpu_to_be32(port);
849 if (mlx4_is_mfunc(dev->dev) && netw_view)
850 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
852 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
856 memcpy(gid->raw, out_mad->data + 8, 8);
858 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
860 /* For any index > 0, return the null guid */
867 init_query_mad(in_mad);
868 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
869 in_mad->attr_mod = cpu_to_be32(index / 8);
871 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
872 NULL, NULL, in_mad, out_mad);
876 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
880 memset(gid->raw + 8, 0, 8);
886 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
889 if (rdma_protocol_ib(ibdev, port))
890 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
894 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
896 union sl2vl_tbl_to_u64 sl2vl64;
897 struct ib_smp *in_mad = NULL;
898 struct ib_smp *out_mad = NULL;
899 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
903 if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
908 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
909 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
910 if (!in_mad || !out_mad)
913 init_query_mad(in_mad);
914 in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
915 in_mad->attr_mod = 0;
917 if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
918 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
920 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
925 for (jj = 0; jj < 8; jj++)
926 sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
927 *sl2vl_tbl = sl2vl64.sl64;
935 static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
941 for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
942 if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
944 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
946 pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
950 atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
954 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
955 u16 *pkey, int netw_view)
957 struct ib_smp *in_mad = NULL;
958 struct ib_smp *out_mad = NULL;
959 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
962 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
963 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
964 if (!in_mad || !out_mad)
967 init_query_mad(in_mad);
968 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
969 in_mad->attr_mod = cpu_to_be32(index / 32);
971 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
972 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
974 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
979 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
987 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
989 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
992 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
993 struct ib_device_modify *props)
995 struct mlx4_cmd_mailbox *mailbox;
998 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1001 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1004 if (mlx4_is_slave(to_mdev(ibdev)->dev))
1007 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
1008 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1009 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
1012 * If possible, pass node desc to FW, so it can generate
1013 * a 144 trap. If cmd fails, just ignore.
1015 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
1016 if (IS_ERR(mailbox))
1019 memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1020 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
1021 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1023 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
1028 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
1031 struct mlx4_cmd_mailbox *mailbox;
1034 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1035 if (IS_ERR(mailbox))
1036 return PTR_ERR(mailbox);
1038 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1039 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
1040 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1042 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
1043 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1046 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1047 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1050 mlx4_free_cmd_mailbox(dev->dev, mailbox);
1054 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1055 struct ib_port_modify *props)
1057 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1058 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
1059 struct ib_port_attr attr;
1063 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
1064 * of whether port link layer is ETH or IB. For ETH ports, qkey
1065 * violations and port capabilities are not meaningful.
1070 mutex_lock(&mdev->cap_mask_mutex);
1072 err = ib_query_port(ibdev, port, &attr);
1076 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1077 ~props->clr_port_cap_mask;
1079 err = mlx4_ib_SET_PORT(mdev, port,
1080 !!(mask & IB_PORT_RESET_QKEY_CNTR),
1084 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1088 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
1089 struct ib_udata *udata)
1091 struct mlx4_ib_dev *dev = to_mdev(ibdev);
1092 struct mlx4_ib_ucontext *context;
1093 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
1094 struct mlx4_ib_alloc_ucontext_resp resp;
1097 if (!dev->ib_active)
1098 return ERR_PTR(-EAGAIN);
1100 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1101 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
1102 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
1103 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1105 resp.dev_caps = dev->dev->caps.userspace_caps;
1106 resp.qp_tab_size = dev->dev->caps.num_qps;
1107 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
1108 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1109 resp.cqe_size = dev->dev->caps.cqe_size;
1112 context = kzalloc(sizeof(*context), GFP_KERNEL);
1114 return ERR_PTR(-ENOMEM);
1116 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1119 return ERR_PTR(err);
1122 INIT_LIST_HEAD(&context->db_page_list);
1123 mutex_init(&context->db_page_mutex);
1125 INIT_LIST_HEAD(&context->wqn_ranges_list);
1126 mutex_init(&context->wqn_ranges_mutex);
1128 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1129 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1131 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1134 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1136 return ERR_PTR(-EFAULT);
1139 return &context->ibucontext;
1142 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1144 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1146 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1152 static void mlx4_ib_vma_open(struct vm_area_struct *area)
1154 /* vma_open is called when a new VMA is created on top of our VMA.
1155 * This is done through either mremap flow or split_vma (usually due
1156 * to mlock, madvise, munmap, etc.). We do not support a clone of the
1157 * vma, as this VMA is strongly hardware related. Therefore we set the
1158 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1159 * calling us again and trying to do incorrect actions. We assume that
1160 * the original vma size is exactly a single page that there will be no
1161 * "splitting" operations on.
1163 area->vm_ops = NULL;
1166 static void mlx4_ib_vma_close(struct vm_area_struct *area)
1168 struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
1170 /* It's guaranteed that all VMAs opened on a FD are closed before the
1171 * file itself is closed, therefore no sync is needed with the regular
1172 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
1173 * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
1174 * The close operation is usually called under mm->mmap_sem except when
1175 * process is exiting. The exiting case is handled explicitly as part
1176 * of mlx4_ib_disassociate_ucontext.
1178 mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
1179 area->vm_private_data;
1181 /* set the vma context pointer to null in the mlx4_ib driver's private
1182 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
1184 mlx4_ib_vma_priv_data->vma = NULL;
1187 static const struct vm_operations_struct mlx4_ib_vm_ops = {
1188 .open = mlx4_ib_vma_open,
1189 .close = mlx4_ib_vma_close
1192 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1195 struct vm_area_struct *vma;
1196 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1198 /* need to protect from a race on closing the vma as part of
1199 * mlx4_ib_vma_close().
1201 for (i = 0; i < HW_BAR_COUNT; i++) {
1202 vma = context->hw_bar_info[i].vma;
1206 zap_vma_ptes(context->hw_bar_info[i].vma,
1207 context->hw_bar_info[i].vma->vm_start, PAGE_SIZE);
1209 context->hw_bar_info[i].vma->vm_flags &=
1210 ~(VM_SHARED | VM_MAYSHARE);
1211 /* context going to be destroyed, should not access ops any more */
1212 context->hw_bar_info[i].vma->vm_ops = NULL;
1216 static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
1217 struct mlx4_ib_vma_private_data *vma_private_data)
1219 vma_private_data->vma = vma;
1220 vma->vm_private_data = vma_private_data;
1221 vma->vm_ops = &mlx4_ib_vm_ops;
1224 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1226 struct mlx4_ib_dev *dev = to_mdev(context->device);
1227 struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
1229 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1232 if (vma->vm_pgoff == 0) {
1233 /* We prevent double mmaping on same context */
1234 if (mucontext->hw_bar_info[HW_BAR_DB].vma)
1237 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1239 if (io_remap_pfn_range(vma, vma->vm_start,
1240 to_mucontext(context)->uar.pfn,
1241 PAGE_SIZE, vma->vm_page_prot))
1244 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
1246 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
1247 /* We prevent double mmaping on same context */
1248 if (mucontext->hw_bar_info[HW_BAR_BF].vma)
1251 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1253 if (io_remap_pfn_range(vma, vma->vm_start,
1254 to_mucontext(context)->uar.pfn +
1255 dev->dev->caps.num_uars,
1256 PAGE_SIZE, vma->vm_page_prot))
1259 mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
1261 } else if (vma->vm_pgoff == 3) {
1262 struct mlx4_clock_params params;
1265 /* We prevent double mmaping on same context */
1266 if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
1269 ret = mlx4_get_internal_clock_params(dev->dev, ¶ms);
1274 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1275 if (io_remap_pfn_range(vma, vma->vm_start,
1276 (pci_resource_start(dev->dev->persist->pdev,
1280 PAGE_SIZE, vma->vm_page_prot))
1283 mlx4_ib_set_vma_data(vma,
1284 &mucontext->hw_bar_info[HW_BAR_CLOCK]);
1292 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
1293 struct ib_ucontext *context,
1294 struct ib_udata *udata)
1296 struct mlx4_ib_pd *pd;
1299 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
1301 return ERR_PTR(-ENOMEM);
1303 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1306 return ERR_PTR(err);
1310 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
1311 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1313 return ERR_PTR(-EFAULT);
1318 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
1320 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1326 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1327 struct ib_ucontext *context,
1328 struct ib_udata *udata)
1330 struct mlx4_ib_xrcd *xrcd;
1331 struct ib_cq_init_attr cq_attr = {};
1334 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1335 return ERR_PTR(-ENOSYS);
1337 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
1339 return ERR_PTR(-ENOMEM);
1341 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1345 xrcd->pd = ib_alloc_pd(ibdev, 0);
1346 if (IS_ERR(xrcd->pd)) {
1347 err = PTR_ERR(xrcd->pd);
1352 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
1353 if (IS_ERR(xrcd->cq)) {
1354 err = PTR_ERR(xrcd->cq);
1358 return &xrcd->ibxrcd;
1361 ib_dealloc_pd(xrcd->pd);
1363 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1366 return ERR_PTR(err);
1369 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1371 ib_destroy_cq(to_mxrcd(xrcd)->cq);
1372 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1373 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1379 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1381 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1382 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1383 struct mlx4_ib_gid_entry *ge;
1385 ge = kzalloc(sizeof *ge, GFP_KERNEL);
1390 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1391 ge->port = mqp->port;
1395 mutex_lock(&mqp->mutex);
1396 list_add_tail(&ge->list, &mqp->gid_list);
1397 mutex_unlock(&mqp->mutex);
1402 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1403 struct mlx4_ib_counters *ctr_table)
1405 struct counter_index *counter, *tmp_count;
1407 mutex_lock(&ctr_table->mutex);
1408 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1410 if (counter->allocated)
1411 mlx4_counter_free(ibdev->dev, counter->index);
1412 list_del(&counter->list);
1415 mutex_unlock(&ctr_table->mutex);
1418 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1421 struct net_device *ndev;
1427 spin_lock_bh(&mdev->iboe.lock);
1428 ndev = mdev->iboe.netdevs[mqp->port - 1];
1431 spin_unlock_bh(&mdev->iboe.lock);
1441 struct mlx4_ib_steering {
1442 struct list_head list;
1443 struct mlx4_flow_reg_id reg_id;
1447 #define LAST_ETH_FIELD vlan_tag
1448 #define LAST_IB_FIELD sl
1449 #define LAST_IPV4_FIELD dst_ip
1450 #define LAST_TCP_UDP_FIELD src_port
1452 /* Field is the last supported field */
1453 #define FIELDS_NOT_SUPPORTED(filter, field)\
1454 memchr_inv((void *)&filter.field +\
1455 sizeof(filter.field), 0,\
1457 offsetof(typeof(filter), field) -\
1458 sizeof(filter.field))
1460 static int parse_flow_attr(struct mlx4_dev *dev,
1462 union ib_flow_spec *ib_spec,
1463 struct _rule_hw *mlx4_spec)
1465 enum mlx4_net_trans_rule_id type;
1467 switch (ib_spec->type) {
1468 case IB_FLOW_SPEC_ETH:
1469 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1472 type = MLX4_NET_TRANS_RULE_ID_ETH;
1473 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1475 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1477 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1478 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1480 case IB_FLOW_SPEC_IB:
1481 if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1484 type = MLX4_NET_TRANS_RULE_ID_IB;
1485 mlx4_spec->ib.l3_qpn =
1486 cpu_to_be32(qp_num);
1487 mlx4_spec->ib.qpn_mask =
1488 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1492 case IB_FLOW_SPEC_IPV4:
1493 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1496 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1497 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1498 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1499 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1500 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1503 case IB_FLOW_SPEC_TCP:
1504 case IB_FLOW_SPEC_UDP:
1505 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1508 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1509 MLX4_NET_TRANS_RULE_ID_TCP :
1510 MLX4_NET_TRANS_RULE_ID_UDP;
1511 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1512 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1513 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1514 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1520 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1521 mlx4_hw_rule_sz(dev, type) < 0)
1523 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1524 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1525 return mlx4_hw_rule_sz(dev, type);
1528 struct default_rules {
1529 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1530 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1531 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1534 static const struct default_rules default_table[] = {
1536 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1537 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1538 .rules_create_list = {IB_FLOW_SPEC_IB},
1539 .link_layer = IB_LINK_LAYER_INFINIBAND
1543 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1544 struct ib_flow_attr *flow_attr)
1548 const struct default_rules *pdefault_rules = default_table;
1549 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1551 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
1552 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1553 memset(&field_types, 0, sizeof(field_types));
1555 if (link_layer != pdefault_rules->link_layer)
1558 ib_flow = flow_attr + 1;
1559 /* we assume the specs are sorted */
1560 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1561 j < flow_attr->num_of_specs; k++) {
1562 union ib_flow_spec *current_flow =
1563 (union ib_flow_spec *)ib_flow;
1565 /* same layer but different type */
1566 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1567 (pdefault_rules->mandatory_fields[k] &
1568 IB_FLOW_SPEC_LAYER_MASK)) &&
1569 (current_flow->type !=
1570 pdefault_rules->mandatory_fields[k]))
1573 /* same layer, try match next one */
1574 if (current_flow->type ==
1575 pdefault_rules->mandatory_fields[k]) {
1578 ((union ib_flow_spec *)ib_flow)->size;
1582 ib_flow = flow_attr + 1;
1583 for (j = 0; j < flow_attr->num_of_specs;
1584 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1585 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1586 /* same layer and same type */
1587 if (((union ib_flow_spec *)ib_flow)->type ==
1588 pdefault_rules->mandatory_not_fields[k])
1597 static int __mlx4_ib_create_default_rules(
1598 struct mlx4_ib_dev *mdev,
1600 const struct default_rules *pdefault_rules,
1601 struct _rule_hw *mlx4_spec) {
1605 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
1606 union ib_flow_spec ib_spec = {};
1609 switch (pdefault_rules->rules_create_list[i]) {
1613 case IB_FLOW_SPEC_IB:
1614 ib_spec.type = IB_FLOW_SPEC_IB;
1615 ib_spec.size = sizeof(struct ib_flow_spec_ib);
1622 /* We must put empty rule, qpn is being ignored */
1623 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1626 pr_info("invalid parsing\n");
1630 mlx4_spec = (void *)mlx4_spec + ret;
1636 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1638 enum mlx4_net_trans_promisc_mode flow_type,
1644 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1645 struct mlx4_cmd_mailbox *mailbox;
1646 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1649 static const u16 __mlx4_domain[] = {
1650 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1651 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1652 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1653 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1656 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1657 pr_err("Invalid priority value %d\n", flow_attr->priority);
1661 if (domain >= IB_FLOW_DOMAIN_NUM) {
1662 pr_err("Invalid domain value %d\n", domain);
1666 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1669 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1670 if (IS_ERR(mailbox))
1671 return PTR_ERR(mailbox);
1672 ctrl = mailbox->buf;
1674 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1675 flow_attr->priority);
1676 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1677 ctrl->port = flow_attr->port;
1678 ctrl->qpn = cpu_to_be32(qp->qp_num);
1680 ib_flow = flow_attr + 1;
1681 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1682 /* Add default flows */
1683 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1684 if (default_flow >= 0) {
1685 ret = __mlx4_ib_create_default_rules(
1686 mdev, qp, default_table + default_flow,
1687 mailbox->buf + size);
1689 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1694 for (i = 0; i < flow_attr->num_of_specs; i++) {
1695 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1696 mailbox->buf + size);
1698 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1701 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1705 if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1706 flow_attr->num_of_specs == 1) {
1707 struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1708 enum ib_flow_spec_type header_spec =
1709 ((union ib_flow_spec *)(flow_attr + 1))->type;
1711 if (header_spec == IB_FLOW_SPEC_ETH)
1712 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1715 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1716 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1719 pr_err("mcg table is full. Fail to register network rule.\n");
1720 else if (ret == -ENXIO)
1721 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1723 pr_err("Invalid argument. Fail to register network rule.\n");
1725 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1729 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1732 err = mlx4_cmd(dev, reg_id, 0, 0,
1733 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1736 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1741 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1745 union ib_flow_spec *ib_spec;
1746 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1749 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1750 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1751 return 0; /* do nothing */
1753 ib_flow = flow_attr + 1;
1754 ib_spec = (union ib_flow_spec *)ib_flow;
1756 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1757 return 0; /* do nothing */
1759 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1760 flow_attr->port, qp->qp_num,
1761 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1766 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1767 struct ib_flow_attr *flow_attr,
1768 enum mlx4_net_trans_promisc_mode *type)
1772 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1773 (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1774 (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1778 if (flow_attr->num_of_specs == 0) {
1779 type[0] = MLX4_FS_MC_SNIFFER;
1780 type[1] = MLX4_FS_UC_SNIFFER;
1782 union ib_flow_spec *ib_spec;
1784 ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1785 if (ib_spec->type != IB_FLOW_SPEC_ETH)
1788 /* if all is zero than MC and UC */
1789 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1790 type[0] = MLX4_FS_MC_SNIFFER;
1791 type[1] = MLX4_FS_UC_SNIFFER;
1793 u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1794 ib_spec->eth.mask.dst_mac[1],
1795 ib_spec->eth.mask.dst_mac[2],
1796 ib_spec->eth.mask.dst_mac[3],
1797 ib_spec->eth.mask.dst_mac[4],
1798 ib_spec->eth.mask.dst_mac[5]};
1800 /* Above xor was only on MC bit, non empty mask is valid
1801 * only if this bit is set and rest are zero.
1803 if (!is_zero_ether_addr(&mac[0]))
1806 if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1807 type[0] = MLX4_FS_MC_SNIFFER;
1809 type[0] = MLX4_FS_UC_SNIFFER;
1816 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1817 struct ib_flow_attr *flow_attr,
1818 int domain, struct ib_udata *udata)
1820 int err = 0, i = 0, j = 0;
1821 struct mlx4_ib_flow *mflow;
1822 enum mlx4_net_trans_promisc_mode type[2];
1823 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1824 int is_bonded = mlx4_is_bonded(dev);
1826 if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
1827 return ERR_PTR(-EINVAL);
1829 if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
1830 return ERR_PTR(-EOPNOTSUPP);
1832 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1833 (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1834 return ERR_PTR(-EOPNOTSUPP);
1837 udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
1838 return ERR_PTR(-EOPNOTSUPP);
1840 memset(type, 0, sizeof(type));
1842 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1848 switch (flow_attr->type) {
1849 case IB_FLOW_ATTR_NORMAL:
1850 /* If dont trap flag (continue match) is set, under specific
1851 * condition traffic be replicated to given qp,
1852 * without stealing it
1854 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1855 err = mlx4_ib_add_dont_trap_rule(dev,
1861 type[0] = MLX4_FS_REGULAR;
1865 case IB_FLOW_ATTR_ALL_DEFAULT:
1866 type[0] = MLX4_FS_ALL_DEFAULT;
1869 case IB_FLOW_ATTR_MC_DEFAULT:
1870 type[0] = MLX4_FS_MC_DEFAULT;
1873 case IB_FLOW_ATTR_SNIFFER:
1874 type[0] = MLX4_FS_MIRROR_RX_PORT;
1875 type[1] = MLX4_FS_MIRROR_SX_PORT;
1883 while (i < ARRAY_SIZE(type) && type[i]) {
1884 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1885 &mflow->reg_id[i].id);
1887 goto err_create_flow;
1889 /* Application always sees one port so the mirror rule
1890 * must be on port #2
1892 flow_attr->port = 2;
1893 err = __mlx4_ib_create_flow(qp, flow_attr,
1895 &mflow->reg_id[j].mirror);
1896 flow_attr->port = 1;
1898 goto err_create_flow;
1905 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1906 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1907 &mflow->reg_id[i].id);
1909 goto err_create_flow;
1912 flow_attr->port = 2;
1913 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1914 &mflow->reg_id[j].mirror);
1915 flow_attr->port = 1;
1917 goto err_create_flow;
1920 /* function to create mirror rule */
1924 return &mflow->ibflow;
1928 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1929 mflow->reg_id[i].id);
1934 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1935 mflow->reg_id[j].mirror);
1940 return ERR_PTR(err);
1943 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1947 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1948 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1950 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1951 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1954 if (mflow->reg_id[i].mirror) {
1955 err = __mlx4_ib_destroy_flow(mdev->dev,
1956 mflow->reg_id[i].mirror);
1967 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1970 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1971 struct mlx4_dev *dev = mdev->dev;
1972 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1973 struct mlx4_ib_steering *ib_steering = NULL;
1974 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1975 struct mlx4_flow_reg_id reg_id;
1977 if (mdev->dev->caps.steering_mode ==
1978 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1979 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1984 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1986 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1989 pr_err("multicast attach op failed, err %d\n", err);
1994 if (mlx4_is_bonded(dev)) {
1995 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1996 (mqp->port == 1) ? 2 : 1,
1998 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1999 prot, ®_id.mirror);
2004 err = add_gid_entry(ibqp, gid);
2009 memcpy(ib_steering->gid.raw, gid->raw, 16);
2010 ib_steering->reg_id = reg_id;
2011 mutex_lock(&mqp->mutex);
2012 list_add(&ib_steering->list, &mqp->steering_rules);
2013 mutex_unlock(&mqp->mutex);
2018 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
2021 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
2022 prot, reg_id.mirror);
2029 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
2031 struct mlx4_ib_gid_entry *ge;
2032 struct mlx4_ib_gid_entry *tmp;
2033 struct mlx4_ib_gid_entry *ret = NULL;
2035 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
2036 if (!memcmp(raw, ge->gid.raw, 16)) {
2045 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2048 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
2049 struct mlx4_dev *dev = mdev->dev;
2050 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
2051 struct net_device *ndev;
2052 struct mlx4_ib_gid_entry *ge;
2053 struct mlx4_flow_reg_id reg_id = {0, 0};
2054 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
2056 if (mdev->dev->caps.steering_mode ==
2057 MLX4_STEERING_MODE_DEVICE_MANAGED) {
2058 struct mlx4_ib_steering *ib_steering;
2060 mutex_lock(&mqp->mutex);
2061 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
2062 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
2063 list_del(&ib_steering->list);
2067 mutex_unlock(&mqp->mutex);
2068 if (&ib_steering->list == &mqp->steering_rules) {
2069 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
2072 reg_id = ib_steering->reg_id;
2076 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
2081 if (mlx4_is_bonded(dev)) {
2082 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
2083 prot, reg_id.mirror);
2088 mutex_lock(&mqp->mutex);
2089 ge = find_gid_entry(mqp, gid->raw);
2091 spin_lock_bh(&mdev->iboe.lock);
2092 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
2095 spin_unlock_bh(&mdev->iboe.lock);
2098 list_del(&ge->list);
2101 pr_warn("could not find mgid entry\n");
2103 mutex_unlock(&mqp->mutex);
2108 static int init_node_data(struct mlx4_ib_dev *dev)
2110 struct ib_smp *in_mad = NULL;
2111 struct ib_smp *out_mad = NULL;
2112 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
2115 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
2116 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
2117 if (!in_mad || !out_mad)
2120 init_query_mad(in_mad);
2121 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
2122 if (mlx4_is_master(dev->dev))
2123 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
2125 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2129 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
2131 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2133 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2137 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
2138 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2146 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2149 struct mlx4_ib_dev *dev =
2150 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2151 return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
2154 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2157 struct mlx4_ib_dev *dev =
2158 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2159 return sprintf(buf, "%x\n", dev->dev->rev_id);
2162 static ssize_t show_board(struct device *device, struct device_attribute *attr,
2165 struct mlx4_ib_dev *dev =
2166 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
2167 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
2168 dev->dev->board_id);
2171 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
2172 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2173 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
2175 static struct device_attribute *mlx4_class_attributes[] = {
2181 struct diag_counter {
2186 #define DIAG_COUNTER(_name, _offset) \
2187 { .name = #_name, .offset = _offset }
2189 static const struct diag_counter diag_basic[] = {
2190 DIAG_COUNTER(rq_num_lle, 0x00),
2191 DIAG_COUNTER(sq_num_lle, 0x04),
2192 DIAG_COUNTER(rq_num_lqpoe, 0x08),
2193 DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2194 DIAG_COUNTER(rq_num_lpe, 0x18),
2195 DIAG_COUNTER(sq_num_lpe, 0x1C),
2196 DIAG_COUNTER(rq_num_wrfe, 0x20),
2197 DIAG_COUNTER(sq_num_wrfe, 0x24),
2198 DIAG_COUNTER(sq_num_mwbe, 0x2C),
2199 DIAG_COUNTER(sq_num_bre, 0x34),
2200 DIAG_COUNTER(sq_num_rire, 0x44),
2201 DIAG_COUNTER(rq_num_rire, 0x48),
2202 DIAG_COUNTER(sq_num_rae, 0x4C),
2203 DIAG_COUNTER(rq_num_rae, 0x50),
2204 DIAG_COUNTER(sq_num_roe, 0x54),
2205 DIAG_COUNTER(sq_num_tree, 0x5C),
2206 DIAG_COUNTER(sq_num_rree, 0x64),
2207 DIAG_COUNTER(rq_num_rnr, 0x68),
2208 DIAG_COUNTER(sq_num_rnr, 0x6C),
2209 DIAG_COUNTER(rq_num_oos, 0x100),
2210 DIAG_COUNTER(sq_num_oos, 0x104),
2213 static const struct diag_counter diag_ext[] = {
2214 DIAG_COUNTER(rq_num_dup, 0x130),
2215 DIAG_COUNTER(sq_num_to, 0x134),
2218 static const struct diag_counter diag_device_only[] = {
2219 DIAG_COUNTER(num_cqovf, 0x1A0),
2220 DIAG_COUNTER(rq_num_udsdprd, 0x118),
2223 static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
2226 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2227 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2229 if (!diag[!!port_num].name)
2232 return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
2233 diag[!!port_num].num_counters,
2234 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2237 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2238 struct rdma_hw_stats *stats,
2241 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2242 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2243 u32 hw_value[ARRAY_SIZE(diag_device_only) +
2244 ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2248 ret = mlx4_query_diag_counters(dev->dev,
2249 MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2250 diag[!!port].offset, hw_value,
2251 diag[!!port].num_counters, port);
2256 for (i = 0; i < diag[!!port].num_counters; i++)
2257 stats->value[i] = hw_value[i];
2259 return diag[!!port].num_counters;
2262 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2270 num_counters = ARRAY_SIZE(diag_basic);
2272 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2273 num_counters += ARRAY_SIZE(diag_ext);
2276 num_counters += ARRAY_SIZE(diag_device_only);
2278 *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
2282 *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2286 *num = num_counters;
2295 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2303 for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2304 name[i] = diag_basic[i].name;
2305 offset[i] = diag_basic[i].offset;
2308 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2309 for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2310 name[j] = diag_ext[i].name;
2311 offset[j] = diag_ext[i].offset;
2316 for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2317 name[j] = diag_device_only[i].name;
2318 offset[j] = diag_device_only[i].offset;
2323 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2325 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2328 bool per_port = !!(ibdev->dev->caps.flags2 &
2329 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2331 if (mlx4_is_slave(ibdev->dev))
2334 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2335 /* i == 1 means we are building port counters */
2339 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2341 &diag[i].num_counters, i);
2345 mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2349 ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats;
2350 ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats;
2356 kfree(diag[i - 1].name);
2357 kfree(diag[i - 1].offset);
2363 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2367 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2368 kfree(ibdev->diag_counters[i].offset);
2369 kfree(ibdev->diag_counters[i].name);
2373 #define MLX4_IB_INVALID_MAC ((u64)-1)
2374 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2375 struct net_device *dev,
2379 u64 release_mac = MLX4_IB_INVALID_MAC;
2380 struct mlx4_ib_qp *qp;
2382 read_lock(&dev_base_lock);
2383 new_smac = mlx4_mac_to_u64(dev->dev_addr);
2384 read_unlock(&dev_base_lock);
2386 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2388 /* no need for update QP1 and mac registration in non-SRIOV */
2389 if (!mlx4_is_mfunc(ibdev->dev))
2392 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2393 qp = ibdev->qp1_proxy[port - 1];
2397 struct mlx4_update_qp_params update_params;
2399 mutex_lock(&qp->mutex);
2400 old_smac = qp->pri.smac;
2401 if (new_smac == old_smac)
2404 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2406 if (new_smac_index < 0)
2409 update_params.smac_index = new_smac_index;
2410 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2412 release_mac = new_smac;
2415 /* if old port was zero, no mac was yet registered for this QP */
2416 if (qp->pri.smac_port)
2417 release_mac = old_smac;
2418 qp->pri.smac = new_smac;
2419 qp->pri.smac_port = port;
2420 qp->pri.smac_index = new_smac_index;
2424 if (release_mac != MLX4_IB_INVALID_MAC)
2425 mlx4_unregister_mac(ibdev->dev, port, release_mac);
2427 mutex_unlock(&qp->mutex);
2428 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2431 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2432 struct net_device *dev,
2433 unsigned long event)
2436 struct mlx4_ib_iboe *iboe;
2437 int update_qps_port = -1;
2442 iboe = &ibdev->iboe;
2444 spin_lock_bh(&iboe->lock);
2445 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
2447 iboe->netdevs[port - 1] =
2448 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
2450 if (dev == iboe->netdevs[port - 1] &&
2451 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2452 event == NETDEV_UP || event == NETDEV_CHANGE))
2453 update_qps_port = port;
2456 spin_unlock_bh(&iboe->lock);
2458 if (update_qps_port > 0)
2459 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
2462 static int mlx4_ib_netdev_event(struct notifier_block *this,
2463 unsigned long event, void *ptr)
2465 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2466 struct mlx4_ib_dev *ibdev;
2468 if (!net_eq(dev_net(dev), &init_net))
2471 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2472 mlx4_ib_scan_netdevs(ibdev, dev, event);
2477 static void init_pkeys(struct mlx4_ib_dev *ibdev)
2483 if (mlx4_is_master(ibdev->dev)) {
2484 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2486 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2488 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2490 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2491 /* master has the identity virt2phys pkey mapping */
2492 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2493 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2494 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2495 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2499 /* initialize pkey cache */
2500 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2502 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2504 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2510 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2512 int i, j, eq = 0, total_eqs = 0;
2514 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2515 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2516 if (!ibdev->eq_table)
2519 for (i = 1; i <= dev->caps.num_ports; i++) {
2520 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2522 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
2524 ibdev->eq_table[eq] = total_eqs;
2525 if (!mlx4_assign_eq(dev, i,
2526 &ibdev->eq_table[eq]))
2529 ibdev->eq_table[eq] = -1;
2533 for (i = eq; i < dev->caps.num_comp_vectors;
2534 ibdev->eq_table[i++] = -1)
2537 /* Advertise the new number of EQs to clients */
2538 ibdev->ib_dev.num_comp_vectors = eq;
2541 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2544 int total_eqs = ibdev->ib_dev.num_comp_vectors;
2546 /* no eqs were allocated */
2547 if (!ibdev->eq_table)
2550 /* Reset the advertised EQ number */
2551 ibdev->ib_dev.num_comp_vectors = 0;
2553 for (i = 0; i < total_eqs; i++)
2554 mlx4_release_eq(dev, ibdev->eq_table[i]);
2556 kfree(ibdev->eq_table);
2557 ibdev->eq_table = NULL;
2560 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2561 struct ib_port_immutable *immutable)
2563 struct ib_port_attr attr;
2564 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2567 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2568 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2569 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2571 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2572 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2573 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2574 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2575 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2576 immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2577 if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2578 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2579 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2582 err = ib_query_port(ibdev, port_num, &attr);
2586 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2587 immutable->gid_tbl_len = attr.gid_tbl_len;
2592 static void get_fw_ver_str(struct ib_device *device, char *str)
2594 struct mlx4_ib_dev *dev =
2595 container_of(device, struct mlx4_ib_dev, ib_dev);
2596 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
2597 (int) (dev->dev->caps.fw_ver >> 32),
2598 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2599 (int) dev->dev->caps.fw_ver & 0xffff);
2602 static void *mlx4_ib_add(struct mlx4_dev *dev)
2604 struct mlx4_ib_dev *ibdev;
2608 struct mlx4_ib_iboe *iboe;
2609 int ib_num_ports = 0;
2610 int num_req_counters;
2613 struct counter_index *new_counter_index = NULL;
2615 pr_info_once("%s", mlx4_ib_version);
2618 mlx4_foreach_ib_transport_port(i, dev)
2621 /* No point in registering a device with no ports... */
2625 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2627 dev_err(&dev->persist->pdev->dev,
2628 "Device struct alloc failed\n");
2632 iboe = &ibdev->iboe;
2634 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2637 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2640 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2642 if (!ibdev->uar_map)
2644 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2647 ibdev->bond_next_port = 0;
2649 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2650 ibdev->ib_dev.owner = THIS_MODULE;
2651 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
2652 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
2653 ibdev->num_ports = num_ports;
2654 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2655 1 : ibdev->num_ports;
2656 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2657 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
2658 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
2659 ibdev->ib_dev.add_gid = mlx4_ib_add_gid;
2660 ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
2662 if (dev->caps.userspace_caps)
2663 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2665 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2667 ibdev->ib_dev.uverbs_cmd_mask =
2668 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2669 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2670 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2671 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2672 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2673 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2674 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
2675 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2676 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2677 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2678 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2679 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2680 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2681 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2682 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2683 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2684 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2685 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2686 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2687 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2688 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2689 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2690 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
2691 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
2693 ibdev->ib_dev.query_device = mlx4_ib_query_device;
2694 ibdev->ib_dev.query_port = mlx4_ib_query_port;
2695 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
2696 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
2697 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
2698 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
2699 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
2700 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
2701 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
2702 ibdev->ib_dev.mmap = mlx4_ib_mmap;
2703 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
2704 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
2705 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
2706 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
2707 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
2708 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
2709 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
2710 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
2711 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
2712 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
2713 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
2714 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
2715 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
2716 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
2717 ibdev->ib_dev.drain_sq = mlx4_ib_drain_sq;
2718 ibdev->ib_dev.drain_rq = mlx4_ib_drain_rq;
2719 ibdev->ib_dev.post_send = mlx4_ib_post_send;
2720 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
2721 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
2722 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
2723 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
2724 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
2725 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
2726 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
2727 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
2728 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
2729 ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
2730 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
2731 ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr;
2732 ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg;
2733 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
2734 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
2735 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
2736 ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
2737 ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str;
2738 ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
2740 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2741 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
2743 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2744 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2745 IB_LINK_LAYER_ETHERNET) ||
2746 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2747 IB_LINK_LAYER_ETHERNET))) {
2748 ibdev->ib_dev.create_wq = mlx4_ib_create_wq;
2749 ibdev->ib_dev.modify_wq = mlx4_ib_modify_wq;
2750 ibdev->ib_dev.destroy_wq = mlx4_ib_destroy_wq;
2751 ibdev->ib_dev.create_rwq_ind_table =
2752 mlx4_ib_create_rwq_ind_table;
2753 ibdev->ib_dev.destroy_rwq_ind_table =
2754 mlx4_ib_destroy_rwq_ind_table;
2755 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2756 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
2757 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
2758 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
2759 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
2760 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
2763 if (!mlx4_is_slave(ibdev->dev)) {
2764 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
2765 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
2766 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
2767 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
2770 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2771 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2772 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
2773 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2775 ibdev->ib_dev.uverbs_cmd_mask |=
2776 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2777 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2780 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2781 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2782 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2783 ibdev->ib_dev.uverbs_cmd_mask |=
2784 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2785 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2788 if (check_flow_steering_support(dev)) {
2789 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2790 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
2791 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
2793 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2794 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2795 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2798 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2799 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
2800 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
2801 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
2803 mlx4_ib_alloc_eqs(dev, ibdev);
2805 spin_lock_init(&iboe->lock);
2807 if (init_node_data(ibdev))
2809 mlx4_init_sl2vl_tbl(ibdev);
2811 for (i = 0; i < ibdev->num_ports; ++i) {
2812 mutex_init(&ibdev->counters_table[i].mutex);
2813 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2816 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2817 for (i = 0; i < num_req_counters; ++i) {
2818 mutex_init(&ibdev->qp1_proxy_lock[i]);
2820 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2821 IB_LINK_LAYER_ETHERNET) {
2822 err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2823 MLX4_RES_USAGE_DRIVER);
2824 /* if failed to allocate a new counter, use default */
2827 mlx4_get_default_counter_index(dev,
2831 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2832 counter_index = mlx4_get_default_counter_index(dev,
2835 new_counter_index = kmalloc(sizeof(*new_counter_index),
2837 if (!new_counter_index) {
2839 mlx4_counter_free(ibdev->dev, counter_index);
2842 new_counter_index->index = counter_index;
2843 new_counter_index->allocated = allocated;
2844 list_add_tail(&new_counter_index->list,
2845 &ibdev->counters_table[i].counters_list);
2846 ibdev->counters_table[i].default_counter = counter_index;
2847 pr_info("counter index %d for port %d allocated %d\n",
2848 counter_index, i + 1, allocated);
2850 if (mlx4_is_bonded(dev))
2851 for (i = 1; i < ibdev->num_ports ; ++i) {
2853 kmalloc(sizeof(struct counter_index),
2855 if (!new_counter_index)
2857 new_counter_index->index = counter_index;
2858 new_counter_index->allocated = 0;
2859 list_add_tail(&new_counter_index->list,
2860 &ibdev->counters_table[i].counters_list);
2861 ibdev->counters_table[i].default_counter =
2865 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2868 spin_lock_init(&ibdev->sm_lock);
2869 mutex_init(&ibdev->cap_mask_mutex);
2870 INIT_LIST_HEAD(&ibdev->qp_list);
2871 spin_lock_init(&ibdev->reset_flow_resource_lock);
2873 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2875 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2876 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2877 MLX4_IB_UC_STEER_QPN_ALIGN,
2878 &ibdev->steer_qpn_base, 0,
2879 MLX4_RES_USAGE_DRIVER);
2883 ibdev->ib_uc_qpns_bitmap =
2884 kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
2887 if (!ibdev->ib_uc_qpns_bitmap)
2888 goto err_steer_qp_release;
2890 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2891 bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2892 ibdev->steer_qpn_count);
2893 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2894 dev, ibdev->steer_qpn_base,
2895 ibdev->steer_qpn_base +
2896 ibdev->steer_qpn_count - 1);
2898 goto err_steer_free_bitmap;
2900 bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2901 ibdev->steer_qpn_count);
2905 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2906 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2908 if (mlx4_ib_alloc_diag_counters(ibdev))
2909 goto err_steer_free_bitmap;
2911 ibdev->ib_dev.driver_id = RDMA_DRIVER_MLX4;
2912 if (ib_register_device(&ibdev->ib_dev, NULL))
2913 goto err_diag_counters;
2915 if (mlx4_ib_mad_init(ibdev))
2918 if (mlx4_ib_init_sriov(ibdev))
2921 if (!iboe->nb.notifier_call) {
2922 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2923 err = register_netdevice_notifier(&iboe->nb);
2925 iboe->nb.notifier_call = NULL;
2929 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2930 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2935 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2936 if (device_create_file(&ibdev->ib_dev.dev,
2937 mlx4_class_attributes[j]))
2941 ibdev->ib_active = true;
2942 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2943 devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2946 if (mlx4_is_mfunc(ibdev->dev))
2949 /* create paravirt contexts for any VFs which are active */
2950 if (mlx4_is_master(ibdev->dev)) {
2951 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2952 if (j == mlx4_master_func_num(ibdev->dev))
2954 if (mlx4_is_slave_active(ibdev->dev, j))
2955 do_slave_init(ibdev, j, 1);
2961 if (ibdev->iboe.nb.notifier_call) {
2962 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2963 pr_warn("failure unregistering notifier\n");
2964 ibdev->iboe.nb.notifier_call = NULL;
2966 flush_workqueue(wq);
2968 mlx4_ib_close_sriov(ibdev);
2971 mlx4_ib_mad_cleanup(ibdev);
2974 ib_unregister_device(&ibdev->ib_dev);
2977 mlx4_ib_diag_cleanup(ibdev);
2979 err_steer_free_bitmap:
2980 kfree(ibdev->ib_uc_qpns_bitmap);
2982 err_steer_qp_release:
2983 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2984 ibdev->steer_qpn_count);
2986 for (i = 0; i < ibdev->num_ports; ++i)
2987 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2990 mlx4_ib_free_eqs(dev, ibdev);
2991 iounmap(ibdev->uar_map);
2994 mlx4_uar_free(dev, &ibdev->priv_uar);
2997 mlx4_pd_free(dev, ibdev->priv_pdn);
3000 ib_dealloc_device(&ibdev->ib_dev);
3005 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
3009 WARN_ON(!dev->ib_uc_qpns_bitmap);
3011 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
3012 dev->steer_qpn_count,
3013 get_count_order(count));
3017 *qpn = dev->steer_qpn_base + offset;
3021 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
3024 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
3027 if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
3028 qpn, dev->steer_qpn_base))
3029 /* not supposed to be here */
3032 bitmap_release_region(dev->ib_uc_qpns_bitmap,
3033 qpn - dev->steer_qpn_base,
3034 get_count_order(count));
3037 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
3042 struct ib_flow_attr *flow = NULL;
3043 struct ib_flow_spec_ib *ib_spec;
3046 flow_size = sizeof(struct ib_flow_attr) +
3047 sizeof(struct ib_flow_spec_ib);
3048 flow = kzalloc(flow_size, GFP_KERNEL);
3051 flow->port = mqp->port;
3052 flow->num_of_specs = 1;
3053 flow->size = flow_size;
3054 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
3055 ib_spec->type = IB_FLOW_SPEC_IB;
3056 ib_spec->size = sizeof(struct ib_flow_spec_ib);
3057 /* Add an empty rule for IB L2 */
3058 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
3060 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
3065 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
3071 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
3073 struct mlx4_ib_dev *ibdev = ibdev_ptr;
3077 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
3078 devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
3079 ibdev->ib_active = false;
3080 flush_workqueue(wq);
3082 if (ibdev->iboe.nb.notifier_call) {
3083 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
3084 pr_warn("failure unregistering notifier\n");
3085 ibdev->iboe.nb.notifier_call = NULL;
3088 mlx4_ib_close_sriov(ibdev);
3089 mlx4_ib_mad_cleanup(ibdev);
3090 ib_unregister_device(&ibdev->ib_dev);
3091 mlx4_ib_diag_cleanup(ibdev);
3093 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
3094 ibdev->steer_qpn_count);
3095 kfree(ibdev->ib_uc_qpns_bitmap);
3097 iounmap(ibdev->uar_map);
3098 for (p = 0; p < ibdev->num_ports; ++p)
3099 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
3101 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
3102 mlx4_CLOSE_PORT(dev, p);
3104 mlx4_ib_free_eqs(dev, ibdev);
3106 mlx4_uar_free(dev, &ibdev->priv_uar);
3107 mlx4_pd_free(dev, ibdev->priv_pdn);
3108 ib_dealloc_device(&ibdev->ib_dev);
3111 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
3113 struct mlx4_ib_demux_work **dm = NULL;
3114 struct mlx4_dev *dev = ibdev->dev;
3116 unsigned long flags;
3117 struct mlx4_active_ports actv_ports;
3119 unsigned int first_port;
3121 if (!mlx4_is_master(dev))
3124 actv_ports = mlx4_get_active_ports(dev, slave);
3125 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
3126 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
3128 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
3132 for (i = 0; i < ports; i++) {
3133 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
3139 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
3140 dm[i]->port = first_port + i + 1;
3141 dm[i]->slave = slave;
3142 dm[i]->do_init = do_init;
3145 /* initialize or tear down tunnel QPs for the slave */
3146 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3147 if (!ibdev->sriov.is_going_down) {
3148 for (i = 0; i < ports; i++)
3149 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3150 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3152 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3153 for (i = 0; i < ports; i++)
3161 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3163 struct mlx4_ib_qp *mqp;
3164 unsigned long flags_qp;
3165 unsigned long flags_cq;
3166 struct mlx4_ib_cq *send_mcq, *recv_mcq;
3167 struct list_head cq_notify_list;
3168 struct mlx4_cq *mcq;
3169 unsigned long flags;
3171 pr_warn("mlx4_ib_handle_catas_error was started\n");
3172 INIT_LIST_HEAD(&cq_notify_list);
3174 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3175 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3177 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3178 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3179 if (mqp->sq.tail != mqp->sq.head) {
3180 send_mcq = to_mcq(mqp->ibqp.send_cq);
3181 spin_lock_irqsave(&send_mcq->lock, flags_cq);
3182 if (send_mcq->mcq.comp &&
3183 mqp->ibqp.send_cq->comp_handler) {
3184 if (!send_mcq->mcq.reset_notify_added) {
3185 send_mcq->mcq.reset_notify_added = 1;
3186 list_add_tail(&send_mcq->mcq.reset_notify,
3190 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3192 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3193 /* Now, handle the QP's receive queue */
3194 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3195 /* no handling is needed for SRQ */
3196 if (!mqp->ibqp.srq) {
3197 if (mqp->rq.tail != mqp->rq.head) {
3198 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3199 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3200 if (recv_mcq->mcq.comp &&
3201 mqp->ibqp.recv_cq->comp_handler) {
3202 if (!recv_mcq->mcq.reset_notify_added) {
3203 recv_mcq->mcq.reset_notify_added = 1;
3204 list_add_tail(&recv_mcq->mcq.reset_notify,
3208 spin_unlock_irqrestore(&recv_mcq->lock,
3212 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3215 list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3218 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3219 pr_warn("mlx4_ib_handle_catas_error ended\n");
3222 static void handle_bonded_port_state_event(struct work_struct *work)
3224 struct ib_event_work *ew =
3225 container_of(work, struct ib_event_work, work);
3226 struct mlx4_ib_dev *ibdev = ew->ib_dev;
3227 enum ib_port_state bonded_port_state = IB_PORT_NOP;
3229 struct ib_event ibev;
3232 spin_lock_bh(&ibdev->iboe.lock);
3233 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3234 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
3235 enum ib_port_state curr_port_state;
3241 (netif_running(curr_netdev) &&
3242 netif_carrier_ok(curr_netdev)) ?
3243 IB_PORT_ACTIVE : IB_PORT_DOWN;
3245 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3246 curr_port_state : IB_PORT_ACTIVE;
3248 spin_unlock_bh(&ibdev->iboe.lock);
3250 ibev.device = &ibdev->ib_dev;
3251 ibev.element.port_num = 1;
3252 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3253 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3255 ib_dispatch_event(&ibev);
3258 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3263 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3265 pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
3269 atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3272 static void ib_sl2vl_update_work(struct work_struct *work)
3274 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3275 struct mlx4_ib_dev *mdev = ew->ib_dev;
3276 int port = ew->port;
3278 mlx4_ib_sl2vl_update(mdev, port);
3283 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3286 struct ib_event_work *ew;
3288 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3290 INIT_WORK(&ew->work, ib_sl2vl_update_work);
3293 queue_work(wq, &ew->work);
3297 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
3298 enum mlx4_dev_event event, unsigned long param)
3300 struct ib_event ibev;
3301 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
3302 struct mlx4_eqe *eqe = NULL;
3303 struct ib_event_work *ew;
3306 if (mlx4_is_bonded(dev) &&
3307 ((event == MLX4_DEV_EVENT_PORT_UP) ||
3308 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3309 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3312 INIT_WORK(&ew->work, handle_bonded_port_state_event);
3314 queue_work(wq, &ew->work);
3318 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
3319 eqe = (struct mlx4_eqe *)param;
3324 case MLX4_DEV_EVENT_PORT_UP:
3325 if (p > ibdev->num_ports)
3327 if (!mlx4_is_slave(dev) &&
3328 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3329 IB_LINK_LAYER_INFINIBAND) {
3330 if (mlx4_is_master(dev))
3331 mlx4_ib_invalidate_all_guid_record(ibdev, p);
3332 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3333 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3334 mlx4_sched_ib_sl2vl_update_work(ibdev, p);
3336 ibev.event = IB_EVENT_PORT_ACTIVE;
3339 case MLX4_DEV_EVENT_PORT_DOWN:
3340 if (p > ibdev->num_ports)
3342 ibev.event = IB_EVENT_PORT_ERR;
3345 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
3346 ibdev->ib_active = false;
3347 ibev.event = IB_EVENT_DEVICE_FATAL;
3348 mlx4_ib_handle_catas_error(ibdev);
3351 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3352 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
3356 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3357 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3359 /* need to queue only for port owner, which uses GEN_EQE */
3360 if (mlx4_is_master(dev))
3361 queue_work(wq, &ew->work);
3363 handle_port_mgmt_change_event(&ew->work);
3366 case MLX4_DEV_EVENT_SLAVE_INIT:
3367 /* here, p is the slave id */
3368 do_slave_init(ibdev, p, 1);
3369 if (mlx4_is_master(dev)) {
3372 for (i = 1; i <= ibdev->num_ports; i++) {
3373 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3374 == IB_LINK_LAYER_INFINIBAND)
3375 mlx4_ib_slave_alias_guid_event(ibdev,
3382 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
3383 if (mlx4_is_master(dev)) {
3386 for (i = 1; i <= ibdev->num_ports; i++) {
3387 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3388 == IB_LINK_LAYER_INFINIBAND)
3389 mlx4_ib_slave_alias_guid_event(ibdev,
3394 /* here, p is the slave id */
3395 do_slave_init(ibdev, p, 0);
3402 ibev.device = ibdev_ptr;
3403 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
3405 ib_dispatch_event(&ibev);
3408 static struct mlx4_interface mlx4_ib_interface = {
3410 .remove = mlx4_ib_remove,
3411 .event = mlx4_ib_event,
3412 .protocol = MLX4_PROT_IB_IPV6,
3413 .flags = MLX4_INTFF_BONDING
3416 static int __init mlx4_ib_init(void)
3420 wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
3424 err = mlx4_ib_mcg_init();
3428 err = mlx4_register_interface(&mlx4_ib_interface);
3435 mlx4_ib_mcg_destroy();
3438 destroy_workqueue(wq);
3442 static void __exit mlx4_ib_cleanup(void)
3444 mlx4_unregister_interface(&mlx4_ib_interface);
3445 mlx4_ib_mcg_destroy();
3446 destroy_workqueue(wq);
3449 module_init(mlx4_ib_init);
3450 module_exit(mlx4_ib_cleanup);