GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / net / ethernet / mellanox / mlx5 / core / qp.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/gfp.h>
34 #include <linux/export.h>
35 #include <linux/mlx5/cmd.h>
36 #include <linux/mlx5/qp.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/transobj.h>
39
40 #include "mlx5_core.h"
41
42 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
43                                                  u32 rsn)
44 {
45         struct mlx5_qp_table *table = &dev->priv.qp_table;
46         struct mlx5_core_rsc_common *common;
47         unsigned long flags;
48
49         spin_lock_irqsave(&table->lock, flags);
50
51         common = radix_tree_lookup(&table->tree, rsn);
52         if (common)
53                 atomic_inc(&common->refcount);
54
55         spin_unlock_irqrestore(&table->lock, flags);
56
57         if (!common) {
58                 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
59                                rsn);
60                 return NULL;
61         }
62         return common;
63 }
64
65 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
66 {
67         if (atomic_dec_and_test(&common->refcount))
68                 complete(&common->free);
69 }
70
71 static u64 qp_allowed_event_types(void)
72 {
73         u64 mask;
74
75         mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
76                BIT(MLX5_EVENT_TYPE_COMM_EST) |
77                BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
78                BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
79                BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
80                BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
81                BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
82                BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
83
84         return mask;
85 }
86
87 static u64 rq_allowed_event_types(void)
88 {
89         u64 mask;
90
91         mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
92                BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
93
94         return mask;
95 }
96
97 static u64 sq_allowed_event_types(void)
98 {
99         return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
100 }
101
102 static u64 dct_allowed_event_types(void)
103 {
104         return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
105 }
106
107 static bool is_event_type_allowed(int rsc_type, int event_type)
108 {
109         switch (rsc_type) {
110         case MLX5_EVENT_QUEUE_TYPE_QP:
111                 return BIT(event_type) & qp_allowed_event_types();
112         case MLX5_EVENT_QUEUE_TYPE_RQ:
113                 return BIT(event_type) & rq_allowed_event_types();
114         case MLX5_EVENT_QUEUE_TYPE_SQ:
115                 return BIT(event_type) & sq_allowed_event_types();
116         case MLX5_EVENT_QUEUE_TYPE_DCT:
117                 return BIT(event_type) & dct_allowed_event_types();
118         default:
119                 WARN(1, "Event arrived for unknown resource type");
120                 return false;
121         }
122 }
123
124 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
125 {
126         struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
127         struct mlx5_core_dct *dct;
128         struct mlx5_core_qp *qp;
129
130         if (!common)
131                 return;
132
133         if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
134                 mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
135                                event_type, rsn);
136                 goto out;
137         }
138
139         switch (common->res) {
140         case MLX5_RES_QP:
141         case MLX5_RES_RQ:
142         case MLX5_RES_SQ:
143                 qp = (struct mlx5_core_qp *)common;
144                 qp->event(qp, event_type);
145                 break;
146         case MLX5_RES_DCT:
147                 dct = (struct mlx5_core_dct *)common;
148                 if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
149                         complete(&dct->drained);
150                 break;
151         default:
152                 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
153         }
154 out:
155         mlx5_core_put_rsc(common);
156 }
157
158 static int create_resource_common(struct mlx5_core_dev *dev,
159                                   struct mlx5_core_qp *qp,
160                                   int rsc_type)
161 {
162         struct mlx5_qp_table *table = &dev->priv.qp_table;
163         int err;
164
165         qp->common.res = rsc_type;
166         spin_lock_irq(&table->lock);
167         err = radix_tree_insert(&table->tree,
168                                 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
169                                 qp);
170         spin_unlock_irq(&table->lock);
171         if (err)
172                 return err;
173
174         atomic_set(&qp->common.refcount, 1);
175         init_completion(&qp->common.free);
176         qp->pid = current->pid;
177
178         return 0;
179 }
180
181 static void destroy_resource_common(struct mlx5_core_dev *dev,
182                                     struct mlx5_core_qp *qp)
183 {
184         struct mlx5_qp_table *table = &dev->priv.qp_table;
185         unsigned long flags;
186
187         spin_lock_irqsave(&table->lock, flags);
188         radix_tree_delete(&table->tree,
189                           qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
190         spin_unlock_irqrestore(&table->lock, flags);
191         mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
192         wait_for_completion(&qp->common.free);
193 }
194
195 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
196                          struct mlx5_core_dct *dct,
197                          u32 *in, int inlen)
198 {
199         u32 out[MLX5_ST_SZ_DW(create_dct_out)]   = {0};
200         u32 din[MLX5_ST_SZ_DW(destroy_dct_in)]   = {0};
201         u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
202         struct mlx5_core_qp *qp = &dct->mqp;
203         int err;
204
205         init_completion(&dct->drained);
206         MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
207
208         err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
209         if (err) {
210                 mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
211                 return err;
212         }
213
214         qp->qpn = MLX5_GET(create_dct_out, out, dctn);
215         err = create_resource_common(dev, qp, MLX5_RES_DCT);
216         if (err)
217                 goto err_cmd;
218
219         return 0;
220 err_cmd:
221         MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
222         MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
223         mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
224                       (void *)&out, sizeof(dout));
225         return err;
226 }
227 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
228
229 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
230                         struct mlx5_core_qp *qp,
231                         u32 *in, int inlen)
232 {
233         u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
234         u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
235         u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
236         int err;
237
238         MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
239
240         err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
241         if (err)
242                 return err;
243
244         qp->qpn = MLX5_GET(create_qp_out, out, qpn);
245         mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
246
247         err = create_resource_common(dev, qp, MLX5_RES_QP);
248         if (err)
249                 goto err_cmd;
250
251         err = mlx5_debug_qp_add(dev, qp);
252         if (err)
253                 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
254                               qp->qpn);
255
256         atomic_inc(&dev->num_qps);
257
258         return 0;
259
260 err_cmd:
261         memset(din, 0, sizeof(din));
262         memset(dout, 0, sizeof(dout));
263         MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
264         MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
265         mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
266         return err;
267 }
268 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
269
270 static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
271                                struct mlx5_core_dct *dct)
272 {
273         u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
274         u32 in[MLX5_ST_SZ_DW(drain_dct_in)]   = {0};
275         struct mlx5_core_qp *qp = &dct->mqp;
276
277         MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
278         MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
279         return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
280                              (void *)&out, sizeof(out));
281 }
282
283 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
284                           struct mlx5_core_dct *dct)
285 {
286         u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
287         u32 in[MLX5_ST_SZ_DW(destroy_dct_in)]   = {0};
288         struct mlx5_core_qp *qp = &dct->mqp;
289         int err;
290
291         err = mlx5_core_drain_dct(dev, dct);
292         if (err) {
293                 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
294                         goto destroy;
295                 } else {
296                         mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
297                         return err;
298                 }
299         }
300         wait_for_completion(&dct->drained);
301 destroy:
302         destroy_resource_common(dev, &dct->mqp);
303         MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
304         MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
305         err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
306                             (void *)&out, sizeof(out));
307         return err;
308 }
309 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
310
311 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
312                          struct mlx5_core_qp *qp)
313 {
314         u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
315         u32 in[MLX5_ST_SZ_DW(destroy_qp_in)]   = {0};
316         int err;
317
318         mlx5_debug_qp_remove(dev, qp);
319
320         destroy_resource_common(dev, qp);
321
322         MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
323         MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
324         err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
325         if (err)
326                 return err;
327
328         atomic_dec(&dev->num_qps);
329         return 0;
330 }
331 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
332
333 int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
334                              u32 timeout_usec)
335 {
336         u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
337         u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)]   = {0};
338
339         MLX5_SET(set_delay_drop_params_in, in, opcode,
340                  MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
341         MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
342                  timeout_usec / 100);
343         return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
344 }
345 EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
346
347 struct mbox_info {
348         u32 *in;
349         u32 *out;
350         int inlen;
351         int outlen;
352 };
353
354 static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
355 {
356         mbox->inlen  = inlen;
357         mbox->outlen = outlen;
358         mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
359         mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
360         if (!mbox->in || !mbox->out) {
361                 kfree(mbox->in);
362                 kfree(mbox->out);
363                 return -ENOMEM;
364         }
365
366         return 0;
367 }
368
369 static void mbox_free(struct mbox_info *mbox)
370 {
371         kfree(mbox->in);
372         kfree(mbox->out);
373 }
374
375 static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
376                                 u32 opt_param_mask, void *qpc,
377                                 struct mbox_info *mbox)
378 {
379         mbox->out = NULL;
380         mbox->in = NULL;
381
382 #define MBOX_ALLOC(mbox, typ)  \
383         mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
384
385 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
386         MLX5_SET(typ##_in, in, opcode, _opcode); \
387         MLX5_SET(typ##_in, in, qpn, _qpn)
388
389 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
390         MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
391         MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
392         memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
393
394         switch (opcode) {
395         /* 2RST & 2ERR */
396         case MLX5_CMD_OP_2RST_QP:
397                 if (MBOX_ALLOC(mbox, qp_2rst))
398                         return -ENOMEM;
399                 MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
400                 break;
401         case MLX5_CMD_OP_2ERR_QP:
402                 if (MBOX_ALLOC(mbox, qp_2err))
403                         return -ENOMEM;
404                 MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
405                 break;
406
407         /* MODIFY with QPC */
408         case MLX5_CMD_OP_RST2INIT_QP:
409                 if (MBOX_ALLOC(mbox, rst2init_qp))
410                         return -ENOMEM;
411                 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
412                                   opt_param_mask, qpc);
413                 break;
414         case MLX5_CMD_OP_INIT2RTR_QP:
415                 if (MBOX_ALLOC(mbox, init2rtr_qp))
416                         return -ENOMEM;
417                 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
418                                   opt_param_mask, qpc);
419                 break;
420         case MLX5_CMD_OP_RTR2RTS_QP:
421                 if (MBOX_ALLOC(mbox, rtr2rts_qp))
422                         return -ENOMEM;
423                 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
424                                   opt_param_mask, qpc);
425                 break;
426         case MLX5_CMD_OP_RTS2RTS_QP:
427                 if (MBOX_ALLOC(mbox, rts2rts_qp))
428                         return -ENOMEM;
429                 MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
430                                   opt_param_mask, qpc);
431                 break;
432         case MLX5_CMD_OP_SQERR2RTS_QP:
433                 if (MBOX_ALLOC(mbox, sqerr2rts_qp))
434                         return -ENOMEM;
435                 MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
436                                   opt_param_mask, qpc);
437                 break;
438         case MLX5_CMD_OP_INIT2INIT_QP:
439                 if (MBOX_ALLOC(mbox, init2init_qp))
440                         return -ENOMEM;
441                 MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
442                                   opt_param_mask, qpc);
443                 break;
444         default:
445                 mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
446                               opcode, qpn);
447                 return -EINVAL;
448         }
449         return 0;
450 }
451
452 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
453                         u32 opt_param_mask, void *qpc,
454                         struct mlx5_core_qp *qp)
455 {
456         struct mbox_info mbox;
457         int err;
458
459         err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
460                                    opt_param_mask, qpc, &mbox);
461         if (err)
462                 return err;
463
464         err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
465         mbox_free(&mbox);
466         return err;
467 }
468 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
469
470 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
471 {
472         struct mlx5_qp_table *table = &dev->priv.qp_table;
473
474         memset(table, 0, sizeof(*table));
475         spin_lock_init(&table->lock);
476         INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
477         mlx5_qp_debugfs_init(dev);
478 }
479
480 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
481 {
482         mlx5_qp_debugfs_cleanup(dev);
483 }
484
485 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
486                        u32 *out, int outlen)
487 {
488         u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
489
490         MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
491         MLX5_SET(query_qp_in, in, qpn, qp->qpn);
492         return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
493 }
494 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
495
496 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
497                         u32 *out, int outlen)
498 {
499         u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
500         struct mlx5_core_qp *qp = &dct->mqp;
501
502         MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
503         MLX5_SET(query_dct_in, in, dctn, qp->qpn);
504
505         return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
506                              (void *)out, outlen);
507 }
508 EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
509
510 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
511 {
512         u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
513         u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)]   = {0};
514         int err;
515
516         MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
517         err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
518         if (!err)
519                 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
520         return err;
521 }
522 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
523
524 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
525 {
526         u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
527         u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)]   = {0};
528
529         MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
530         MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
531         return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
532 }
533 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
534
535 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
536                                 struct mlx5_core_qp *rq)
537 {
538         int err;
539         u32 rqn;
540
541         err = mlx5_core_create_rq(dev, in, inlen, &rqn);
542         if (err)
543                 return err;
544
545         rq->qpn = rqn;
546         err = create_resource_common(dev, rq, MLX5_RES_RQ);
547         if (err)
548                 goto err_destroy_rq;
549
550         return 0;
551
552 err_destroy_rq:
553         mlx5_core_destroy_rq(dev, rq->qpn);
554
555         return err;
556 }
557 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
558
559 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
560                                   struct mlx5_core_qp *rq)
561 {
562         destroy_resource_common(dev, rq);
563         mlx5_core_destroy_rq(dev, rq->qpn);
564 }
565 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
566
567 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
568                                 struct mlx5_core_qp *sq)
569 {
570         int err;
571         u32 sqn;
572
573         err = mlx5_core_create_sq(dev, in, inlen, &sqn);
574         if (err)
575                 return err;
576
577         sq->qpn = sqn;
578         err = create_resource_common(dev, sq, MLX5_RES_SQ);
579         if (err)
580                 goto err_destroy_sq;
581
582         return 0;
583
584 err_destroy_sq:
585         mlx5_core_destroy_sq(dev, sq->qpn);
586
587         return err;
588 }
589 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
590
591 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
592                                   struct mlx5_core_qp *sq)
593 {
594         destroy_resource_common(dev, sq);
595         mlx5_core_destroy_sq(dev, sq->qpn);
596 }
597 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
598
599 int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
600 {
601         u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]   = {0};
602         u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
603         int err;
604
605         MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
606         err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
607         if (!err)
608                 *counter_id = MLX5_GET(alloc_q_counter_out, out,
609                                        counter_set_id);
610         return err;
611 }
612 EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
613
614 int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
615 {
616         u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)]   = {0};
617         u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
618
619         MLX5_SET(dealloc_q_counter_in, in, opcode,
620                  MLX5_CMD_OP_DEALLOC_Q_COUNTER);
621         MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
622         return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
623 }
624 EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
625
626 int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
627                               int reset, void *out, int out_size)
628 {
629         u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
630
631         MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
632         MLX5_SET(query_q_counter_in, in, clear, reset);
633         MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
634         return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
635 }
636 EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);