2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/gfp.h>
34 #include <linux/export.h>
35 #include <linux/mlx5/cmd.h>
36 #include <linux/mlx5/qp.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/transobj.h>
40 #include "mlx5_core.h"
42 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
45 struct mlx5_qp_table *table = &dev->priv.qp_table;
46 struct mlx5_core_rsc_common *common;
49 spin_lock_irqsave(&table->lock, flags);
51 common = radix_tree_lookup(&table->tree, rsn);
53 atomic_inc(&common->refcount);
55 spin_unlock_irqrestore(&table->lock, flags);
58 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
65 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
67 if (atomic_dec_and_test(&common->refcount))
68 complete(&common->free);
71 static u64 qp_allowed_event_types(void)
75 mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
76 BIT(MLX5_EVENT_TYPE_COMM_EST) |
77 BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
78 BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
79 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
80 BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
81 BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
82 BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
87 static u64 rq_allowed_event_types(void)
91 mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
92 BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
97 static u64 sq_allowed_event_types(void)
99 return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
102 static u64 dct_allowed_event_types(void)
104 return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
107 static bool is_event_type_allowed(int rsc_type, int event_type)
110 case MLX5_EVENT_QUEUE_TYPE_QP:
111 return BIT(event_type) & qp_allowed_event_types();
112 case MLX5_EVENT_QUEUE_TYPE_RQ:
113 return BIT(event_type) & rq_allowed_event_types();
114 case MLX5_EVENT_QUEUE_TYPE_SQ:
115 return BIT(event_type) & sq_allowed_event_types();
116 case MLX5_EVENT_QUEUE_TYPE_DCT:
117 return BIT(event_type) & dct_allowed_event_types();
119 WARN(1, "Event arrived for unknown resource type");
124 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
126 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
127 struct mlx5_core_dct *dct;
128 struct mlx5_core_qp *qp;
133 if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
134 mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
139 switch (common->res) {
143 qp = (struct mlx5_core_qp *)common;
144 qp->event(qp, event_type);
147 dct = (struct mlx5_core_dct *)common;
148 if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
149 complete(&dct->drained);
152 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
155 mlx5_core_put_rsc(common);
158 static int create_resource_common(struct mlx5_core_dev *dev,
159 struct mlx5_core_qp *qp,
162 struct mlx5_qp_table *table = &dev->priv.qp_table;
165 qp->common.res = rsc_type;
166 spin_lock_irq(&table->lock);
167 err = radix_tree_insert(&table->tree,
168 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
170 spin_unlock_irq(&table->lock);
174 atomic_set(&qp->common.refcount, 1);
175 init_completion(&qp->common.free);
176 qp->pid = current->pid;
181 static void destroy_resource_common(struct mlx5_core_dev *dev,
182 struct mlx5_core_qp *qp)
184 struct mlx5_qp_table *table = &dev->priv.qp_table;
187 spin_lock_irqsave(&table->lock, flags);
188 radix_tree_delete(&table->tree,
189 qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
190 spin_unlock_irqrestore(&table->lock, flags);
191 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
192 wait_for_completion(&qp->common.free);
195 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
196 struct mlx5_core_dct *dct,
199 u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
200 u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
201 u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
202 struct mlx5_core_qp *qp = &dct->mqp;
205 init_completion(&dct->drained);
206 MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
208 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
210 mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
214 qp->qpn = MLX5_GET(create_dct_out, out, dctn);
215 err = create_resource_common(dev, qp, MLX5_RES_DCT);
221 MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
222 MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
223 mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
224 (void *)&out, sizeof(dout));
227 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
229 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
230 struct mlx5_core_qp *qp,
233 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
234 u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
235 u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
238 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
240 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
244 qp->qpn = MLX5_GET(create_qp_out, out, qpn);
245 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
247 err = create_resource_common(dev, qp, MLX5_RES_QP);
251 err = mlx5_debug_qp_add(dev, qp);
253 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
256 atomic_inc(&dev->num_qps);
261 memset(din, 0, sizeof(din));
262 memset(dout, 0, sizeof(dout));
263 MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
264 MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
265 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
268 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
270 static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
271 struct mlx5_core_dct *dct)
273 u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
274 u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
275 struct mlx5_core_qp *qp = &dct->mqp;
277 MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
278 MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
279 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
280 (void *)&out, sizeof(out));
283 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
284 struct mlx5_core_dct *dct)
286 u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
287 u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
288 struct mlx5_core_qp *qp = &dct->mqp;
291 err = mlx5_core_drain_dct(dev, dct);
293 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
296 mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
300 wait_for_completion(&dct->drained);
302 destroy_resource_common(dev, &dct->mqp);
303 MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
304 MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
305 err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
306 (void *)&out, sizeof(out));
309 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
311 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
312 struct mlx5_core_qp *qp)
314 u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
315 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
318 mlx5_debug_qp_remove(dev, qp);
320 destroy_resource_common(dev, qp);
322 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
323 MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
324 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
328 atomic_dec(&dev->num_qps);
331 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
333 int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
336 u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
337 u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {0};
339 MLX5_SET(set_delay_drop_params_in, in, opcode,
340 MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
341 MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
343 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
345 EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
354 static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
357 mbox->outlen = outlen;
358 mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
359 mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
360 if (!mbox->in || !mbox->out) {
369 static void mbox_free(struct mbox_info *mbox)
375 static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
376 u32 opt_param_mask, void *qpc,
377 struct mbox_info *mbox)
382 #define MBOX_ALLOC(mbox, typ) \
383 mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
385 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
386 MLX5_SET(typ##_in, in, opcode, _opcode); \
387 MLX5_SET(typ##_in, in, qpn, _qpn)
389 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
390 MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
391 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
392 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
396 case MLX5_CMD_OP_2RST_QP:
397 if (MBOX_ALLOC(mbox, qp_2rst))
399 MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
401 case MLX5_CMD_OP_2ERR_QP:
402 if (MBOX_ALLOC(mbox, qp_2err))
404 MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
407 /* MODIFY with QPC */
408 case MLX5_CMD_OP_RST2INIT_QP:
409 if (MBOX_ALLOC(mbox, rst2init_qp))
411 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
412 opt_param_mask, qpc);
414 case MLX5_CMD_OP_INIT2RTR_QP:
415 if (MBOX_ALLOC(mbox, init2rtr_qp))
417 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
418 opt_param_mask, qpc);
420 case MLX5_CMD_OP_RTR2RTS_QP:
421 if (MBOX_ALLOC(mbox, rtr2rts_qp))
423 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
424 opt_param_mask, qpc);
426 case MLX5_CMD_OP_RTS2RTS_QP:
427 if (MBOX_ALLOC(mbox, rts2rts_qp))
429 MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
430 opt_param_mask, qpc);
432 case MLX5_CMD_OP_SQERR2RTS_QP:
433 if (MBOX_ALLOC(mbox, sqerr2rts_qp))
435 MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
436 opt_param_mask, qpc);
438 case MLX5_CMD_OP_INIT2INIT_QP:
439 if (MBOX_ALLOC(mbox, init2init_qp))
441 MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
442 opt_param_mask, qpc);
445 mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
452 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
453 u32 opt_param_mask, void *qpc,
454 struct mlx5_core_qp *qp)
456 struct mbox_info mbox;
459 err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
460 opt_param_mask, qpc, &mbox);
464 err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
468 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
470 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
472 struct mlx5_qp_table *table = &dev->priv.qp_table;
474 memset(table, 0, sizeof(*table));
475 spin_lock_init(&table->lock);
476 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
477 mlx5_qp_debugfs_init(dev);
480 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
482 mlx5_qp_debugfs_cleanup(dev);
485 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
486 u32 *out, int outlen)
488 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
490 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
491 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
492 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
494 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
496 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
497 u32 *out, int outlen)
499 u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
500 struct mlx5_core_qp *qp = &dct->mqp;
502 MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
503 MLX5_SET(query_dct_in, in, dctn, qp->qpn);
505 return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
506 (void *)out, outlen);
508 EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
510 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
512 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
513 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
516 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
517 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
519 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
522 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
524 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
526 u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
527 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
529 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
530 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
531 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
533 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
535 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
536 struct mlx5_core_qp *rq)
541 err = mlx5_core_create_rq(dev, in, inlen, &rqn);
546 err = create_resource_common(dev, rq, MLX5_RES_RQ);
553 mlx5_core_destroy_rq(dev, rq->qpn);
557 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
559 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
560 struct mlx5_core_qp *rq)
562 destroy_resource_common(dev, rq);
563 mlx5_core_destroy_rq(dev, rq->qpn);
565 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
567 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
568 struct mlx5_core_qp *sq)
573 err = mlx5_core_create_sq(dev, in, inlen, &sqn);
578 err = create_resource_common(dev, sq, MLX5_RES_SQ);
585 mlx5_core_destroy_sq(dev, sq->qpn);
589 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
591 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
592 struct mlx5_core_qp *sq)
594 destroy_resource_common(dev, sq);
595 mlx5_core_destroy_sq(dev, sq->qpn);
597 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
599 int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
601 u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
602 u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
605 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
606 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
608 *counter_id = MLX5_GET(alloc_q_counter_out, out,
612 EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
614 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
616 u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
617 u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
619 MLX5_SET(dealloc_q_counter_in, in, opcode,
620 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
621 MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
622 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
624 EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
626 int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
627 int reset, void *out, int out_size)
629 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
631 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
632 MLX5_SET(query_q_counter_in, in, clear, reset);
633 MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
634 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
636 EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);