GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / infiniband / hw / mlx5 / cq.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/kref.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/ib_cache.h>
37 #include "mlx5_ib.h"
38
39 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
40 {
41         struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
42
43         ibcq->comp_handler(ibcq, ibcq->cq_context);
44 }
45
46 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
47 {
48         struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
49         struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
50         struct ib_cq *ibcq = &cq->ibcq;
51         struct ib_event event;
52
53         if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
54                 mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
55                              type, mcq->cqn);
56                 return;
57         }
58
59         if (ibcq->event_handler) {
60                 event.device     = &dev->ib_dev;
61                 event.event      = IB_EVENT_CQ_ERR;
62                 event.element.cq = ibcq;
63                 ibcq->event_handler(&event, ibcq->cq_context);
64         }
65 }
66
67 static void *get_cqe(struct mlx5_ib_cq *cq, int n)
68 {
69         return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
70 }
71
72 static u8 sw_ownership_bit(int n, int nent)
73 {
74         return (n & nent) ? 1 : 0;
75 }
76
77 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
78 {
79         void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
80         struct mlx5_cqe64 *cqe64;
81
82         cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
83
84         if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
85             !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
86                 return cqe;
87         } else {
88                 return NULL;
89         }
90 }
91
92 static void *next_cqe_sw(struct mlx5_ib_cq *cq)
93 {
94         return get_sw_cqe(cq, cq->mcq.cons_index);
95 }
96
97 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
98 {
99         switch (wq->wr_data[idx]) {
100         case MLX5_IB_WR_UMR:
101                 return 0;
102
103         case IB_WR_LOCAL_INV:
104                 return IB_WC_LOCAL_INV;
105
106         case IB_WR_REG_MR:
107                 return IB_WC_REG_MR;
108
109         default:
110                 pr_warn("unknown completion status\n");
111                 return 0;
112         }
113 }
114
115 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
116                             struct mlx5_ib_wq *wq, int idx)
117 {
118         wc->wc_flags = 0;
119         switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
120         case MLX5_OPCODE_RDMA_WRITE_IMM:
121                 wc->wc_flags |= IB_WC_WITH_IMM;
122                 /* fall through */
123         case MLX5_OPCODE_RDMA_WRITE:
124                 wc->opcode    = IB_WC_RDMA_WRITE;
125                 break;
126         case MLX5_OPCODE_SEND_IMM:
127                 wc->wc_flags |= IB_WC_WITH_IMM;
128                 /* fall through */
129         case MLX5_OPCODE_SEND:
130         case MLX5_OPCODE_SEND_INVAL:
131                 wc->opcode    = IB_WC_SEND;
132                 break;
133         case MLX5_OPCODE_RDMA_READ:
134                 wc->opcode    = IB_WC_RDMA_READ;
135                 wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
136                 break;
137         case MLX5_OPCODE_ATOMIC_CS:
138                 wc->opcode    = IB_WC_COMP_SWAP;
139                 wc->byte_len  = 8;
140                 break;
141         case MLX5_OPCODE_ATOMIC_FA:
142                 wc->opcode    = IB_WC_FETCH_ADD;
143                 wc->byte_len  = 8;
144                 break;
145         case MLX5_OPCODE_ATOMIC_MASKED_CS:
146                 wc->opcode    = IB_WC_MASKED_COMP_SWAP;
147                 wc->byte_len  = 8;
148                 break;
149         case MLX5_OPCODE_ATOMIC_MASKED_FA:
150                 wc->opcode    = IB_WC_MASKED_FETCH_ADD;
151                 wc->byte_len  = 8;
152                 break;
153         case MLX5_OPCODE_UMR:
154                 wc->opcode = get_umr_comp(wq, idx);
155                 break;
156         }
157 }
158
159 enum {
160         MLX5_GRH_IN_BUFFER = 1,
161         MLX5_GRH_IN_CQE    = 2,
162 };
163
164 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
165                              struct mlx5_ib_qp *qp)
166 {
167         enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
168         struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
169         struct mlx5_ib_srq *srq;
170         struct mlx5_ib_wq *wq;
171         u16 wqe_ctr;
172         u8  roce_packet_type;
173         bool vlan_present;
174         u8 g;
175
176         if (qp->ibqp.srq || qp->ibqp.xrcd) {
177                 struct mlx5_core_srq *msrq = NULL;
178
179                 if (qp->ibqp.xrcd) {
180                         msrq = mlx5_core_get_srq(dev->mdev,
181                                                  be32_to_cpu(cqe->srqn));
182                         srq = to_mibsrq(msrq);
183                 } else {
184                         srq = to_msrq(qp->ibqp.srq);
185                 }
186                 if (srq) {
187                         wqe_ctr = be16_to_cpu(cqe->wqe_counter);
188                         wc->wr_id = srq->wrid[wqe_ctr];
189                         mlx5_ib_free_srq_wqe(srq, wqe_ctr);
190                         if (msrq && atomic_dec_and_test(&msrq->refcount))
191                                 complete(&msrq->free);
192                 }
193         } else {
194                 wq        = &qp->rq;
195                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
196                 ++wq->tail;
197         }
198         wc->byte_len = be32_to_cpu(cqe->byte_cnt);
199
200         switch (cqe->op_own >> 4) {
201         case MLX5_CQE_RESP_WR_IMM:
202                 wc->opcode      = IB_WC_RECV_RDMA_WITH_IMM;
203                 wc->wc_flags    = IB_WC_WITH_IMM;
204                 wc->ex.imm_data = cqe->imm_inval_pkey;
205                 break;
206         case MLX5_CQE_RESP_SEND:
207                 wc->opcode   = IB_WC_RECV;
208                 wc->wc_flags = IB_WC_IP_CSUM_OK;
209                 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
210                                (cqe->hds_ip_ext & CQE_L4_OK))))
211                         wc->wc_flags = 0;
212                 break;
213         case MLX5_CQE_RESP_SEND_IMM:
214                 wc->opcode      = IB_WC_RECV;
215                 wc->wc_flags    = IB_WC_WITH_IMM;
216                 wc->ex.imm_data = cqe->imm_inval_pkey;
217                 break;
218         case MLX5_CQE_RESP_SEND_INV:
219                 wc->opcode      = IB_WC_RECV;
220                 wc->wc_flags    = IB_WC_WITH_INVALIDATE;
221                 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
222                 break;
223         }
224         wc->src_qp         = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
225         wc->dlid_path_bits = cqe->ml_path;
226         g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
227         wc->wc_flags |= g ? IB_WC_GRH : 0;
228         if (unlikely(is_qp1(qp->ibqp.qp_type))) {
229                 u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
230
231                 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
232                                     &wc->pkey_index);
233         } else {
234                 wc->pkey_index = 0;
235         }
236
237         if (ll != IB_LINK_LAYER_ETHERNET) {
238                 wc->slid = be16_to_cpu(cqe->slid);
239                 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
240                 return;
241         }
242
243         wc->slid = 0;
244         vlan_present = cqe->l4_l3_hdr_type & 0x1;
245         roce_packet_type   = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
246         if (vlan_present) {
247                 wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
248                 wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
249                 wc->wc_flags |= IB_WC_WITH_VLAN;
250         } else {
251                 wc->sl = 0;
252         }
253
254         switch (roce_packet_type) {
255         case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
256                 wc->network_hdr_type = RDMA_NETWORK_IB;
257                 break;
258         case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
259                 wc->network_hdr_type = RDMA_NETWORK_IPV6;
260                 break;
261         case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
262                 wc->network_hdr_type = RDMA_NETWORK_IPV4;
263                 break;
264         }
265         wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
266 }
267
268 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
269 {
270         mlx5_ib_warn(dev, "dump error cqe\n");
271         mlx5_dump_err_cqe(dev->mdev, cqe);
272 }
273
274 static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
275                                   struct mlx5_err_cqe *cqe,
276                                   struct ib_wc *wc)
277 {
278         int dump = 1;
279
280         switch (cqe->syndrome) {
281         case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
282                 wc->status = IB_WC_LOC_LEN_ERR;
283                 break;
284         case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
285                 wc->status = IB_WC_LOC_QP_OP_ERR;
286                 break;
287         case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
288                 wc->status = IB_WC_LOC_PROT_ERR;
289                 break;
290         case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
291                 dump = 0;
292                 wc->status = IB_WC_WR_FLUSH_ERR;
293                 break;
294         case MLX5_CQE_SYNDROME_MW_BIND_ERR:
295                 wc->status = IB_WC_MW_BIND_ERR;
296                 break;
297         case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
298                 wc->status = IB_WC_BAD_RESP_ERR;
299                 break;
300         case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
301                 wc->status = IB_WC_LOC_ACCESS_ERR;
302                 break;
303         case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
304                 wc->status = IB_WC_REM_INV_REQ_ERR;
305                 break;
306         case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
307                 wc->status = IB_WC_REM_ACCESS_ERR;
308                 break;
309         case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
310                 wc->status = IB_WC_REM_OP_ERR;
311                 break;
312         case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
313                 wc->status = IB_WC_RETRY_EXC_ERR;
314                 dump = 0;
315                 break;
316         case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
317                 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
318                 dump = 0;
319                 break;
320         case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
321                 wc->status = IB_WC_REM_ABORT_ERR;
322                 break;
323         default:
324                 wc->status = IB_WC_GENERAL_ERR;
325                 break;
326         }
327
328         wc->vendor_err = cqe->vendor_err_synd;
329         if (dump)
330                 dump_cqe(dev, cqe);
331 }
332
333 static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx)
334 {
335         /* TBD: waiting decision
336         */
337         return 0;
338 }
339
340 static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx)
341 {
342         struct mlx5_wqe_data_seg *dpseg;
343         void *addr;
344
345         dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
346                 sizeof(struct mlx5_wqe_raddr_seg) +
347                 sizeof(struct mlx5_wqe_atomic_seg);
348         addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr);
349         return addr;
350 }
351
352 static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
353                           uint16_t idx)
354 {
355         void *addr;
356         int byte_count;
357         int i;
358
359         if (!is_atomic_response(qp, idx))
360                 return;
361
362         byte_count = be32_to_cpu(cqe64->byte_cnt);
363         addr = mlx5_get_atomic_laddr(qp, idx);
364
365         if (byte_count == 4) {
366                 *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr));
367         } else {
368                 for (i = 0; i < byte_count; i += 8) {
369                         *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr));
370                         addr += 8;
371                 }
372         }
373
374         return;
375 }
376
377 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
378                            u16 tail, u16 head)
379 {
380         u16 idx;
381
382         do {
383                 idx = tail & (qp->sq.wqe_cnt - 1);
384                 handle_atomic(qp, cqe64, idx);
385                 if (idx == head)
386                         break;
387
388                 tail = qp->sq.w_list[idx].next;
389         } while (1);
390         tail = qp->sq.w_list[idx].next;
391         qp->sq.last_poll = tail;
392 }
393
394 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
395 {
396         mlx5_frag_buf_free(dev->mdev, &buf->fbc.frag_buf);
397 }
398
399 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
400                              struct ib_sig_err *item)
401 {
402         u16 syndrome = be16_to_cpu(cqe->syndrome);
403
404 #define GUARD_ERR   (1 << 13)
405 #define APPTAG_ERR  (1 << 12)
406 #define REFTAG_ERR  (1 << 11)
407
408         if (syndrome & GUARD_ERR) {
409                 item->err_type = IB_SIG_BAD_GUARD;
410                 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
411                 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
412         } else
413         if (syndrome & REFTAG_ERR) {
414                 item->err_type = IB_SIG_BAD_REFTAG;
415                 item->expected = be32_to_cpu(cqe->expected_reftag);
416                 item->actual = be32_to_cpu(cqe->actual_reftag);
417         } else
418         if (syndrome & APPTAG_ERR) {
419                 item->err_type = IB_SIG_BAD_APPTAG;
420                 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
421                 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
422         } else {
423                 pr_err("Got signature completion error with bad syndrome %04x\n",
424                        syndrome);
425         }
426
427         item->sig_err_offset = be64_to_cpu(cqe->err_offset);
428         item->key = be32_to_cpu(cqe->mkey);
429 }
430
431 static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
432                          struct ib_wc *wc, int *npolled)
433 {
434         struct mlx5_ib_wq *wq;
435         unsigned int cur;
436         unsigned int idx;
437         int np;
438         int i;
439
440         wq = &qp->sq;
441         cur = wq->head - wq->tail;
442         np = *npolled;
443
444         if (cur == 0)
445                 return;
446
447         for (i = 0;  i < cur && np < num_entries; i++) {
448                 idx = wq->last_poll & (wq->wqe_cnt - 1);
449                 wc->wr_id = wq->wrid[idx];
450                 wc->status = IB_WC_WR_FLUSH_ERR;
451                 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
452                 wq->tail++;
453                 np++;
454                 wc->qp = &qp->ibqp;
455                 wc++;
456                 wq->last_poll = wq->w_list[idx].next;
457         }
458         *npolled = np;
459 }
460
461 static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
462                          struct ib_wc *wc, int *npolled)
463 {
464         struct mlx5_ib_wq *wq;
465         unsigned int cur;
466         int np;
467         int i;
468
469         wq = &qp->rq;
470         cur = wq->head - wq->tail;
471         np = *npolled;
472
473         if (cur == 0)
474                 return;
475
476         for (i = 0;  i < cur && np < num_entries; i++) {
477                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
478                 wc->status = IB_WC_WR_FLUSH_ERR;
479                 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
480                 wq->tail++;
481                 np++;
482                 wc->qp = &qp->ibqp;
483                 wc++;
484         }
485         *npolled = np;
486 }
487
488 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
489                                  struct ib_wc *wc, int *npolled)
490 {
491         struct mlx5_ib_qp *qp;
492
493         *npolled = 0;
494         /* Find uncompleted WQEs belonging to that cq and return mmics ones */
495         list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
496                 sw_send_comp(qp, num_entries, wc + *npolled, npolled);
497                 if (*npolled >= num_entries)
498                         return;
499         }
500
501         list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
502                 sw_recv_comp(qp, num_entries, wc + *npolled, npolled);
503                 if (*npolled >= num_entries)
504                         return;
505         }
506 }
507
508 static int mlx5_poll_one(struct mlx5_ib_cq *cq,
509                          struct mlx5_ib_qp **cur_qp,
510                          struct ib_wc *wc)
511 {
512         struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
513         struct mlx5_err_cqe *err_cqe;
514         struct mlx5_cqe64 *cqe64;
515         struct mlx5_core_qp *mqp;
516         struct mlx5_ib_wq *wq;
517         struct mlx5_sig_err_cqe *sig_err_cqe;
518         struct mlx5_core_mkey *mmkey;
519         struct mlx5_ib_mr *mr;
520         uint8_t opcode;
521         uint32_t qpn;
522         u16 wqe_ctr;
523         void *cqe;
524         int idx;
525
526 repoll:
527         cqe = next_cqe_sw(cq);
528         if (!cqe)
529                 return -EAGAIN;
530
531         cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
532
533         ++cq->mcq.cons_index;
534
535         /* Make sure we read CQ entry contents after we've checked the
536          * ownership bit.
537          */
538         rmb();
539
540         opcode = cqe64->op_own >> 4;
541         if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
542                 if (likely(cq->resize_buf)) {
543                         free_cq_buf(dev, &cq->buf);
544                         cq->buf = *cq->resize_buf;
545                         kfree(cq->resize_buf);
546                         cq->resize_buf = NULL;
547                         goto repoll;
548                 } else {
549                         mlx5_ib_warn(dev, "unexpected resize cqe\n");
550                 }
551         }
552
553         qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
554         if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
555                 /* We do not have to take the QP table lock here,
556                  * because CQs will be locked while QPs are removed
557                  * from the table.
558                  */
559                 mqp = __mlx5_qp_lookup(dev->mdev, qpn);
560                 *cur_qp = to_mibqp(mqp);
561         }
562
563         wc->qp  = &(*cur_qp)->ibqp;
564         switch (opcode) {
565         case MLX5_CQE_REQ:
566                 wq = &(*cur_qp)->sq;
567                 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
568                 idx = wqe_ctr & (wq->wqe_cnt - 1);
569                 handle_good_req(wc, cqe64, wq, idx);
570                 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
571                 wc->wr_id = wq->wrid[idx];
572                 wq->tail = wq->wqe_head[idx] + 1;
573                 wc->status = IB_WC_SUCCESS;
574                 break;
575         case MLX5_CQE_RESP_WR_IMM:
576         case MLX5_CQE_RESP_SEND:
577         case MLX5_CQE_RESP_SEND_IMM:
578         case MLX5_CQE_RESP_SEND_INV:
579                 handle_responder(wc, cqe64, *cur_qp);
580                 wc->status = IB_WC_SUCCESS;
581                 break;
582         case MLX5_CQE_RESIZE_CQ:
583                 break;
584         case MLX5_CQE_REQ_ERR:
585         case MLX5_CQE_RESP_ERR:
586                 err_cqe = (struct mlx5_err_cqe *)cqe64;
587                 mlx5_handle_error_cqe(dev, err_cqe, wc);
588                 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
589                             opcode == MLX5_CQE_REQ_ERR ?
590                             "Requestor" : "Responder", cq->mcq.cqn);
591                 mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
592                             err_cqe->syndrome, err_cqe->vendor_err_synd);
593                 if (opcode == MLX5_CQE_REQ_ERR) {
594                         wq = &(*cur_qp)->sq;
595                         wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
596                         idx = wqe_ctr & (wq->wqe_cnt - 1);
597                         wc->wr_id = wq->wrid[idx];
598                         wq->tail = wq->wqe_head[idx] + 1;
599                 } else {
600                         struct mlx5_ib_srq *srq;
601
602                         if ((*cur_qp)->ibqp.srq) {
603                                 srq = to_msrq((*cur_qp)->ibqp.srq);
604                                 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
605                                 wc->wr_id = srq->wrid[wqe_ctr];
606                                 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
607                         } else {
608                                 wq = &(*cur_qp)->rq;
609                                 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
610                                 ++wq->tail;
611                         }
612                 }
613                 break;
614         case MLX5_CQE_SIG_ERR:
615                 sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
616
617                 read_lock(&dev->mdev->priv.mkey_table.lock);
618                 mmkey = __mlx5_mr_lookup(dev->mdev,
619                                          mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
620                 mr = to_mibmr(mmkey);
621                 get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
622                 mr->sig->sig_err_exists = true;
623                 mr->sig->sigerr_count++;
624
625                 mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
626                              cq->mcq.cqn, mr->sig->err_item.key,
627                              mr->sig->err_item.err_type,
628                              mr->sig->err_item.sig_err_offset,
629                              mr->sig->err_item.expected,
630                              mr->sig->err_item.actual);
631
632                 read_unlock(&dev->mdev->priv.mkey_table.lock);
633                 goto repoll;
634         }
635
636         return 0;
637 }
638
639 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
640                         struct ib_wc *wc, bool is_fatal_err)
641 {
642         struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
643         struct mlx5_ib_wc *soft_wc, *next;
644         int npolled = 0;
645
646         list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
647                 if (npolled >= num_entries)
648                         break;
649
650                 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
651                             cq->mcq.cqn);
652
653                 if (unlikely(is_fatal_err)) {
654                         soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
655                         soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
656                 }
657                 wc[npolled++] = soft_wc->wc;
658                 list_del(&soft_wc->list);
659                 kfree(soft_wc);
660         }
661
662         return npolled;
663 }
664
665 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
666 {
667         struct mlx5_ib_cq *cq = to_mcq(ibcq);
668         struct mlx5_ib_qp *cur_qp = NULL;
669         struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
670         struct mlx5_core_dev *mdev = dev->mdev;
671         unsigned long flags;
672         int soft_polled = 0;
673         int npolled;
674
675         spin_lock_irqsave(&cq->lock, flags);
676         if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
677                 /* make sure no soft wqe's are waiting */
678                 if (unlikely(!list_empty(&cq->wc_list)))
679                         soft_polled = poll_soft_wc(cq, num_entries, wc, true);
680
681                 mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
682                                      wc + soft_polled, &npolled);
683                 goto out;
684         }
685
686         if (unlikely(!list_empty(&cq->wc_list)))
687                 soft_polled = poll_soft_wc(cq, num_entries, wc, false);
688
689         for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
690                 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
691                         break;
692         }
693
694         if (npolled)
695                 mlx5_cq_set_ci(&cq->mcq);
696 out:
697         spin_unlock_irqrestore(&cq->lock, flags);
698
699         return soft_polled + npolled;
700 }
701
702 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
703 {
704         struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
705         struct mlx5_ib_cq *cq = to_mcq(ibcq);
706         void __iomem *uar_page = mdev->priv.uar->map;
707         unsigned long irq_flags;
708         int ret = 0;
709
710         spin_lock_irqsave(&cq->lock, irq_flags);
711         if (cq->notify_flags != IB_CQ_NEXT_COMP)
712                 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
713
714         if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
715                 ret = 1;
716         spin_unlock_irqrestore(&cq->lock, irq_flags);
717
718         mlx5_cq_arm(&cq->mcq,
719                     (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
720                     MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
721                     uar_page, to_mcq(ibcq)->mcq.cons_index);
722
723         return ret;
724 }
725
726 static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
727                              struct mlx5_ib_cq_buf *buf,
728                              int nent,
729                              int cqe_size)
730 {
731         struct mlx5_frag_buf_ctrl *c = &buf->fbc;
732         struct mlx5_frag_buf *frag_buf = &c->frag_buf;
733         u32 cqc_buff[MLX5_ST_SZ_DW(cqc)] = {0};
734         int err;
735
736         MLX5_SET(cqc, cqc_buff, log_cq_size, ilog2(cqe_size));
737         MLX5_SET(cqc, cqc_buff, cqe_sz, (cqe_size == 128) ? 1 : 0);
738
739         mlx5_core_init_cq_frag_buf(&buf->fbc, cqc_buff);
740
741         err = mlx5_frag_buf_alloc_node(dev->mdev,
742                                        nent * cqe_size,
743                                        frag_buf,
744                                        dev->mdev->priv.numa_node);
745         if (err)
746                 return err;
747
748         buf->cqe_size = cqe_size;
749         buf->nent = nent;
750
751         return 0;
752 }
753
754 enum {
755         MLX5_CQE_RES_FORMAT_HASH = 0,
756         MLX5_CQE_RES_FORMAT_CSUM = 1,
757         MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
758 };
759
760 static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
761 {
762         switch (format) {
763         case MLX5_IB_CQE_RES_FORMAT_HASH:
764                 return MLX5_CQE_RES_FORMAT_HASH;
765         case MLX5_IB_CQE_RES_FORMAT_CSUM:
766                 return MLX5_CQE_RES_FORMAT_CSUM;
767         case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
768                 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
769                         return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
770                 return -EOPNOTSUPP;
771         default:
772                 return -EINVAL;
773         }
774 }
775
776 static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
777                           struct ib_ucontext *context, struct mlx5_ib_cq *cq,
778                           int entries, u32 **cqb,
779                           int *cqe_size, int *index, int *inlen)
780 {
781         struct mlx5_ib_create_cq ucmd = {};
782         size_t ucmdlen;
783         int page_shift;
784         __be64 *pas;
785         int npages;
786         int ncont;
787         void *cqc;
788         int err;
789
790         ucmdlen = udata->inlen < sizeof(ucmd) ?
791                   (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd);
792
793         if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
794                 return -EFAULT;
795
796         if (ucmdlen == sizeof(ucmd) &&
797             (ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD)))
798                 return -EINVAL;
799
800         if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
801                 return -EINVAL;
802
803         *cqe_size = ucmd.cqe_size;
804
805         cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
806                                    entries * ucmd.cqe_size,
807                                    IB_ACCESS_LOCAL_WRITE, 1);
808         if (IS_ERR(cq->buf.umem)) {
809                 err = PTR_ERR(cq->buf.umem);
810                 return err;
811         }
812
813         err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
814                                   &cq->db);
815         if (err)
816                 goto err_umem;
817
818         mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
819                            &ncont, NULL);
820         mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
821                     ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
822
823         *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
824                  MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
825         *cqb = kvzalloc(*inlen, GFP_KERNEL);
826         if (!*cqb) {
827                 err = -ENOMEM;
828                 goto err_db;
829         }
830
831         pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
832         mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
833
834         cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
835         MLX5_SET(cqc, cqc, log_page_size,
836                  page_shift - MLX5_ADAPTER_PAGE_SHIFT);
837
838         *index = to_mucontext(context)->bfregi.sys_pages[0];
839
840         if (ucmd.cqe_comp_en == 1) {
841                 int mini_cqe_format;
842
843                 if (!((*cqe_size == 128 &&
844                        MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
845                       (*cqe_size == 64  &&
846                        MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
847                         err = -EOPNOTSUPP;
848                         mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
849                                      *cqe_size);
850                         goto err_cqb;
851                 }
852
853                 mini_cqe_format =
854                         mini_cqe_res_format_to_hw(dev,
855                                                   ucmd.cqe_comp_res_format);
856                 if (mini_cqe_format < 0) {
857                         err = mini_cqe_format;
858                         mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
859                                     ucmd.cqe_comp_res_format, err);
860                         goto err_cqb;
861                 }
862
863                 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
864                 MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
865         }
866
867         if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
868                 if (*cqe_size != 128 ||
869                     !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
870                         err = -EOPNOTSUPP;
871                         mlx5_ib_warn(dev,
872                                      "CQE padding is not supported for CQE size of %dB!\n",
873                                      *cqe_size);
874                         goto err_cqb;
875                 }
876
877                 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
878         }
879
880         return 0;
881
882 err_cqb:
883         kvfree(*cqb);
884
885 err_db:
886         mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
887
888 err_umem:
889         ib_umem_release(cq->buf.umem);
890         return err;
891 }
892
893 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
894 {
895         mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
896         ib_umem_release(cq->buf.umem);
897 }
898
899 static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
900 {
901         int i;
902         void *cqe;
903         struct mlx5_cqe64 *cqe64;
904
905         for (i = 0; i < buf->nent; i++) {
906                 cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
907                 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
908                 cqe64->op_own = MLX5_CQE_INVALID << 4;
909         }
910 }
911
912 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
913                             int entries, int cqe_size,
914                             u32 **cqb, int *index, int *inlen)
915 {
916         __be64 *pas;
917         void *cqc;
918         int err;
919
920         err = mlx5_db_alloc(dev->mdev, &cq->db);
921         if (err)
922                 return err;
923
924         cq->mcq.set_ci_db  = cq->db.db;
925         cq->mcq.arm_db     = cq->db.db + 1;
926         cq->mcq.cqe_sz = cqe_size;
927
928         err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
929         if (err)
930                 goto err_db;
931
932         init_cq_frag_buf(&cq->buf);
933
934         *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
935                  MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
936                  cq->buf.fbc.frag_buf.npages;
937         *cqb = kvzalloc(*inlen, GFP_KERNEL);
938         if (!*cqb) {
939                 err = -ENOMEM;
940                 goto err_buf;
941         }
942
943         pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
944         mlx5_fill_page_frag_array(&cq->buf.fbc.frag_buf, pas);
945
946         cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
947         MLX5_SET(cqc, cqc, log_page_size,
948                  cq->buf.fbc.frag_buf.page_shift -
949                  MLX5_ADAPTER_PAGE_SHIFT);
950
951         *index = dev->mdev->priv.uar->index;
952
953         return 0;
954
955 err_buf:
956         free_cq_buf(dev, &cq->buf);
957
958 err_db:
959         mlx5_db_free(dev->mdev, &cq->db);
960         return err;
961 }
962
963 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
964 {
965         free_cq_buf(dev, &cq->buf);
966         mlx5_db_free(dev->mdev, &cq->db);
967 }
968
969 static void notify_soft_wc_handler(struct work_struct *work)
970 {
971         struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
972                                              notify_work);
973
974         cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
975 }
976
977 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
978                                 const struct ib_cq_init_attr *attr,
979                                 struct ib_ucontext *context,
980                                 struct ib_udata *udata)
981 {
982         int entries = attr->cqe;
983         int vector = attr->comp_vector;
984         struct mlx5_ib_dev *dev = to_mdev(ibdev);
985         struct mlx5_ib_cq *cq;
986         int uninitialized_var(index);
987         int uninitialized_var(inlen);
988         u32 *cqb = NULL;
989         void *cqc;
990         int cqe_size;
991         unsigned int irqn;
992         int eqn;
993         int err;
994
995         if (entries < 0 ||
996             (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
997                 return ERR_PTR(-EINVAL);
998
999         if (check_cq_create_flags(attr->flags))
1000                 return ERR_PTR(-EOPNOTSUPP);
1001
1002         entries = roundup_pow_of_two(entries + 1);
1003         if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
1004                 return ERR_PTR(-EINVAL);
1005
1006         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1007         if (!cq)
1008                 return ERR_PTR(-ENOMEM);
1009
1010         cq->ibcq.cqe = entries - 1;
1011         mutex_init(&cq->resize_mutex);
1012         spin_lock_init(&cq->lock);
1013         cq->resize_buf = NULL;
1014         cq->resize_umem = NULL;
1015         cq->create_flags = attr->flags;
1016         INIT_LIST_HEAD(&cq->list_send_qp);
1017         INIT_LIST_HEAD(&cq->list_recv_qp);
1018
1019         if (context) {
1020                 err = create_cq_user(dev, udata, context, cq, entries,
1021                                      &cqb, &cqe_size, &index, &inlen);
1022                 if (err)
1023                         goto err_create;
1024         } else {
1025                 cqe_size = cache_line_size() == 128 ? 128 : 64;
1026                 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
1027                                        &index, &inlen);
1028                 if (err)
1029                         goto err_create;
1030
1031                 INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
1032         }
1033
1034         err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
1035         if (err)
1036                 goto err_cqb;
1037
1038         cq->cqe_size = cqe_size;
1039
1040         cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
1041         MLX5_SET(cqc, cqc, cqe_sz,
1042                  cqe_sz_to_mlx_sz(cqe_size,
1043                                   cq->private_flags &
1044                                   MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
1045         MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
1046         MLX5_SET(cqc, cqc, uar_page, index);
1047         MLX5_SET(cqc, cqc, c_eqn, eqn);
1048         MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
1049         if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
1050                 MLX5_SET(cqc, cqc, oi, 1);
1051
1052         err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
1053         if (err)
1054                 goto err_cqb;
1055
1056         mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
1057         cq->mcq.irqn = irqn;
1058         if (context)
1059                 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
1060         else
1061                 cq->mcq.comp  = mlx5_ib_cq_comp;
1062         cq->mcq.event = mlx5_ib_cq_event;
1063
1064         INIT_LIST_HEAD(&cq->wc_list);
1065
1066         if (context)
1067                 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
1068                         err = -EFAULT;
1069                         goto err_cmd;
1070                 }
1071
1072
1073         kvfree(cqb);
1074         return &cq->ibcq;
1075
1076 err_cmd:
1077         mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
1078
1079 err_cqb:
1080         kvfree(cqb);
1081         if (context)
1082                 destroy_cq_user(cq, context);
1083         else
1084                 destroy_cq_kernel(dev, cq);
1085
1086 err_create:
1087         kfree(cq);
1088
1089         return ERR_PTR(err);
1090 }
1091
1092
1093 int mlx5_ib_destroy_cq(struct ib_cq *cq)
1094 {
1095         struct mlx5_ib_dev *dev = to_mdev(cq->device);
1096         struct mlx5_ib_cq *mcq = to_mcq(cq);
1097         struct ib_ucontext *context = NULL;
1098
1099         if (cq->uobject)
1100                 context = cq->uobject->context;
1101
1102         mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
1103         if (context)
1104                 destroy_cq_user(mcq, context);
1105         else
1106                 destroy_cq_kernel(dev, mcq);
1107
1108         kfree(mcq);
1109
1110         return 0;
1111 }
1112
1113 static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
1114 {
1115         return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
1116 }
1117
1118 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
1119 {
1120         struct mlx5_cqe64 *cqe64, *dest64;
1121         void *cqe, *dest;
1122         u32 prod_index;
1123         int nfreed = 0;
1124         u8 owner_bit;
1125
1126         if (!cq)
1127                 return;
1128
1129         /* First we need to find the current producer index, so we
1130          * know where to start cleaning from.  It doesn't matter if HW
1131          * adds new entries after this loop -- the QP we're worried
1132          * about is already in RESET, so the new entries won't come
1133          * from our QP and therefore don't need to be checked.
1134          */
1135         for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1136                 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1137                         break;
1138
1139         /* Now sweep backwards through the CQ, removing CQ entries
1140          * that match our QP by copying older entries on top of them.
1141          */
1142         while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1143                 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1144                 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1145                 if (is_equal_rsn(cqe64, rsn)) {
1146                         if (srq && (ntohl(cqe64->srqn) & 0xffffff))
1147                                 mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
1148                         ++nfreed;
1149                 } else if (nfreed) {
1150                         dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1151                         dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1152                         owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
1153                         memcpy(dest, cqe, cq->mcq.cqe_sz);
1154                         dest64->op_own = owner_bit |
1155                                 (dest64->op_own & ~MLX5_CQE_OWNER_MASK);
1156                 }
1157         }
1158
1159         if (nfreed) {
1160                 cq->mcq.cons_index += nfreed;
1161                 /* Make sure update of buffer contents is done before
1162                  * updating consumer index.
1163                  */
1164                 wmb();
1165                 mlx5_cq_set_ci(&cq->mcq);
1166         }
1167 }
1168
1169 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1170 {
1171         if (!cq)
1172                 return;
1173
1174         spin_lock_irq(&cq->lock);
1175         __mlx5_ib_cq_clean(cq, qpn, srq);
1176         spin_unlock_irq(&cq->lock);
1177 }
1178
1179 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1180 {
1181         struct mlx5_ib_dev *dev = to_mdev(cq->device);
1182         struct mlx5_ib_cq *mcq = to_mcq(cq);
1183         int err;
1184
1185         if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
1186                 return -EOPNOTSUPP;
1187
1188         if (cq_period > MLX5_MAX_CQ_PERIOD)
1189                 return -EINVAL;
1190
1191         err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
1192                                              cq_period, cq_count);
1193         if (err)
1194                 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1195
1196         return err;
1197 }
1198
1199 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1200                        int entries, struct ib_udata *udata, int *npas,
1201                        int *page_shift, int *cqe_size)
1202 {
1203         struct mlx5_ib_resize_cq ucmd;
1204         struct ib_umem *umem;
1205         int err;
1206         int npages;
1207         struct ib_ucontext *context = cq->buf.umem->context;
1208
1209         err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
1210         if (err)
1211                 return err;
1212
1213         if (ucmd.reserved0 || ucmd.reserved1)
1214                 return -EINVAL;
1215
1216         /* check multiplication overflow */
1217         if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
1218                 return -EINVAL;
1219
1220         umem = ib_umem_get(context, ucmd.buf_addr,
1221                            (size_t)ucmd.cqe_size * entries,
1222                            IB_ACCESS_LOCAL_WRITE, 1);
1223         if (IS_ERR(umem)) {
1224                 err = PTR_ERR(umem);
1225                 return err;
1226         }
1227
1228         mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
1229                            npas, NULL);
1230
1231         cq->resize_umem = umem;
1232         *cqe_size = ucmd.cqe_size;
1233
1234         return 0;
1235 }
1236
1237 static void un_resize_user(struct mlx5_ib_cq *cq)
1238 {
1239         ib_umem_release(cq->resize_umem);
1240 }
1241
1242 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1243                          int entries, int cqe_size)
1244 {
1245         int err;
1246
1247         cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
1248         if (!cq->resize_buf)
1249                 return -ENOMEM;
1250
1251         err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
1252         if (err)
1253                 goto ex;
1254
1255         init_cq_frag_buf(cq->resize_buf);
1256
1257         return 0;
1258
1259 ex:
1260         kfree(cq->resize_buf);
1261         return err;
1262 }
1263
1264 static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
1265 {
1266         free_cq_buf(dev, cq->resize_buf);
1267         cq->resize_buf = NULL;
1268 }
1269
1270 static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1271 {
1272         struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1273         struct mlx5_cqe64 *scqe64;
1274         struct mlx5_cqe64 *dcqe64;
1275         void *start_cqe;
1276         void *scqe;
1277         void *dcqe;
1278         int ssize;
1279         int dsize;
1280         int i;
1281         u8 sw_own;
1282
1283         ssize = cq->buf.cqe_size;
1284         dsize = cq->resize_buf->cqe_size;
1285         if (ssize != dsize) {
1286                 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1287                 return -EINVAL;
1288         }
1289
1290         i = cq->mcq.cons_index;
1291         scqe = get_sw_cqe(cq, i);
1292         scqe64 = ssize == 64 ? scqe : scqe + 64;
1293         start_cqe = scqe;
1294         if (!scqe) {
1295                 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1296                 return -EINVAL;
1297         }
1298
1299         while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
1300                 dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
1301                                              (i + 1) & cq->resize_buf->nent);
1302                 dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
1303                 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1304                 memcpy(dcqe, scqe, dsize);
1305                 dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
1306
1307                 ++i;
1308                 scqe = get_sw_cqe(cq, i);
1309                 scqe64 = ssize == 64 ? scqe : scqe + 64;
1310                 if (!scqe) {
1311                         mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1312                         return -EINVAL;
1313                 }
1314
1315                 if (scqe == start_cqe) {
1316                         pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1317                                 cq->mcq.cqn);
1318                         return -ENOMEM;
1319                 }
1320         }
1321         ++cq->mcq.cons_index;
1322         return 0;
1323 }
1324
1325 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1326 {
1327         struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
1328         struct mlx5_ib_cq *cq = to_mcq(ibcq);
1329         void *cqc;
1330         u32 *in;
1331         int err;
1332         int npas;
1333         __be64 *pas;
1334         int page_shift;
1335         int inlen;
1336         int uninitialized_var(cqe_size);
1337         unsigned long flags;
1338
1339         if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
1340                 pr_info("Firmware does not support resize CQ\n");
1341                 return -ENOSYS;
1342         }
1343
1344         if (entries < 1 ||
1345             entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
1346                 mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
1347                              entries,
1348                              1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
1349                 return -EINVAL;
1350         }
1351
1352         entries = roundup_pow_of_two(entries + 1);
1353         if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
1354                 return -EINVAL;
1355
1356         if (entries == ibcq->cqe + 1)
1357                 return 0;
1358
1359         mutex_lock(&cq->resize_mutex);
1360         if (udata) {
1361                 err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
1362                                   &cqe_size);
1363         } else {
1364                 cqe_size = 64;
1365                 err = resize_kernel(dev, cq, entries, cqe_size);
1366                 if (!err) {
1367                         struct mlx5_frag_buf_ctrl *c;
1368
1369                         c = &cq->resize_buf->fbc;
1370                         npas = c->frag_buf.npages;
1371                         page_shift = c->frag_buf.page_shift;
1372                 }
1373         }
1374
1375         if (err)
1376                 goto ex;
1377
1378         inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
1379                 MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
1380
1381         in = kvzalloc(inlen, GFP_KERNEL);
1382         if (!in) {
1383                 err = -ENOMEM;
1384                 goto ex_resize;
1385         }
1386
1387         pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
1388         if (udata)
1389                 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
1390                                      pas, 0);
1391         else
1392                 mlx5_fill_page_frag_array(&cq->resize_buf->fbc.frag_buf,
1393                                           pas);
1394
1395         MLX5_SET(modify_cq_in, in,
1396                  modify_field_select_resize_field_select.resize_field_select.resize_field_select,
1397                  MLX5_MODIFY_CQ_MASK_LOG_SIZE  |
1398                  MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1399                  MLX5_MODIFY_CQ_MASK_PG_SIZE);
1400
1401         cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
1402
1403         MLX5_SET(cqc, cqc, log_page_size,
1404                  page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1405         MLX5_SET(cqc, cqc, cqe_sz,
1406                  cqe_sz_to_mlx_sz(cqe_size,
1407                                   cq->private_flags &
1408                                   MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
1409         MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
1410
1411         MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
1412         MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1413
1414         err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1415         if (err)
1416                 goto ex_alloc;
1417
1418         if (udata) {
1419                 cq->ibcq.cqe = entries - 1;
1420                 ib_umem_release(cq->buf.umem);
1421                 cq->buf.umem = cq->resize_umem;
1422                 cq->resize_umem = NULL;
1423         } else {
1424                 struct mlx5_ib_cq_buf tbuf;
1425                 int resized = 0;
1426
1427                 spin_lock_irqsave(&cq->lock, flags);
1428                 if (cq->resize_buf) {
1429                         err = copy_resize_cqes(cq);
1430                         if (!err) {
1431                                 tbuf = cq->buf;
1432                                 cq->buf = *cq->resize_buf;
1433                                 kfree(cq->resize_buf);
1434                                 cq->resize_buf = NULL;
1435                                 resized = 1;
1436                         }
1437                 }
1438                 cq->ibcq.cqe = entries - 1;
1439                 spin_unlock_irqrestore(&cq->lock, flags);
1440                 if (resized)
1441                         free_cq_buf(dev, &tbuf);
1442         }
1443         mutex_unlock(&cq->resize_mutex);
1444
1445         kvfree(in);
1446         return 0;
1447
1448 ex_alloc:
1449         kvfree(in);
1450
1451 ex_resize:
1452         if (udata)
1453                 un_resize_user(cq);
1454         else
1455                 un_resize_kernel(dev, cq);
1456 ex:
1457         mutex_unlock(&cq->resize_mutex);
1458         return err;
1459 }
1460
1461 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
1462 {
1463         struct mlx5_ib_cq *cq;
1464
1465         if (!ibcq)
1466                 return 128;
1467
1468         cq = to_mcq(ibcq);
1469         return cq->cqe_size;
1470 }
1471
1472 /* Called from atomic context */
1473 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
1474 {
1475         struct mlx5_ib_wc *soft_wc;
1476         struct mlx5_ib_cq *cq = to_mcq(ibcq);
1477         unsigned long flags;
1478
1479         soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
1480         if (!soft_wc)
1481                 return -ENOMEM;
1482
1483         soft_wc->wc = *wc;
1484         spin_lock_irqsave(&cq->lock, flags);
1485         list_add_tail(&soft_wc->list, &cq->wc_list);
1486         if (cq->notify_flags == IB_CQ_NEXT_COMP ||
1487             wc->status != IB_WC_SUCCESS) {
1488                 cq->notify_flags = 0;
1489                 schedule_work(&cq->notify_work);
1490         }
1491         spin_unlock_irqrestore(&cq->lock, flags);
1492
1493         return 0;
1494 }