GNU Linux-libre 4.9.309-gnu1
[releases.git] / drivers / infiniband / hw / i40iw / i40iw_ctrl.c
1 /*******************************************************************************
2 *
3 * Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
10 *
11 *   Redistribution and use in source and binary forms, with or
12 *   without modification, are permitted provided that the following
13 *   conditions are met:
14 *
15 *    - Redistributions of source code must retain the above
16 *       copyright notice, this list of conditions and the following
17 *       disclaimer.
18 *
19 *    - Redistributions in binary form must reproduce the above
20 *       copyright notice, this list of conditions and the following
21 *       disclaimer in the documentation and/or other materials
22 *       provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 *******************************************************************************/
34
35 #include "i40iw_osdep.h"
36 #include "i40iw_register.h"
37 #include "i40iw_status.h"
38 #include "i40iw_hmc.h"
39
40 #include "i40iw_d.h"
41 #include "i40iw_type.h"
42 #include "i40iw_p.h"
43 #include "i40iw_vf.h"
44 #include "i40iw_virtchnl.h"
45
46 /**
47  * i40iw_insert_wqe_hdr - write wqe header
48  * @wqe: cqp wqe for header
49  * @header: header for the cqp wqe
50  */
51 static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
52 {
53         wmb();            /* make sure WQE is populated before polarity is set */
54         set_64bit_val(wqe, 24, header);
55 }
56
57 /**
58  * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
59  * @cqp: struct for cqp hw
60  * @val: cqp tail register value
61  * @tail:wqtail register value
62  * @error: cqp processing err
63  */
64 static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,
65                                           u32 *val,
66                                           u32 *tail,
67                                           u32 *error)
68 {
69         if (cqp->dev->is_pf) {
70                 *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);
71                 *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL);
72                 *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR);
73         } else {
74                 *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);
75                 *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL);
76                 *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR);
77         }
78 }
79
80 /**
81  * i40iw_cqp_poll_registers - poll cqp registers
82  * @cqp: struct for cqp hw
83  * @tail:wqtail register value
84  * @count: how many times to try for completion
85  */
86 static enum i40iw_status_code i40iw_cqp_poll_registers(
87                                                 struct i40iw_sc_cqp *cqp,
88                                                 u32 tail,
89                                                 u32 count)
90 {
91         u32 i = 0;
92         u32 newtail, error, val;
93
94         while (i < count) {
95                 i++;
96                 i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);
97                 if (error) {
98                         error = (cqp->dev->is_pf) ?
99                                  i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :
100                                  i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
101                         return I40IW_ERR_CQP_COMPL_ERROR;
102                 }
103                 if (newtail != tail) {
104                         /* SUCCESS */
105                         I40IW_RING_MOVE_TAIL(cqp->sq_ring);
106                         return 0;
107                 }
108                 udelay(I40IW_SLEEP_COUNT);
109         }
110         return I40IW_ERR_TIMEOUT;
111 }
112
113 /**
114  * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
115  * @buf: ptr to fpm commit buffer
116  * @info: ptr to i40iw_hmc_obj_info struct
117  * @sd: number of SDs for HMC objects
118  *
119  * parses fpm commit info and copy base value
120  * of hmc objects in hmc_info
121  */
122 static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
123                                 u64 *buf,
124                                 struct i40iw_hmc_obj_info *info,
125                                 u32 *sd)
126 {
127         u64 temp;
128         u64 size;
129         u64 base = 0;
130         u32 i, j;
131         u32 k = 0;
132         u32 low;
133
134         /* copy base values in obj_info */
135         for (i = I40IW_HMC_IW_QP, j = 0;
136                         i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
137                 get_64bit_val(buf, j, &temp);
138                 info[i].base = RS_64_1(temp, 32) * 512;
139                 if (info[i].base > base) {
140                         base = info[i].base;
141                         k = i;
142                 }
143                 low = (u32)(temp);
144                 if (low)
145                         info[i].cnt = low;
146         }
147         size = info[k].cnt * info[k].size + info[k].base;
148         if (size & 0x1FFFFF)
149                 *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
150         else
151                 *sd = (u32)(size >> 21);
152
153         return 0;
154 }
155
156 /**
157  * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
158  * @buf: ptr to fpm query buffer
159  * @info: ptr to i40iw_hmc_obj_info struct
160  * @hmc_fpm_misc: ptr to fpm data
161  *
162  * parses fpm query buffer and copy max_cnt and
163  * size value of hmc objects in hmc_info
164  */
165 static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
166                                 u64 *buf,
167                                 struct i40iw_hmc_info *hmc_info,
168                                 struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
169 {
170         u64 temp;
171         struct i40iw_hmc_obj_info *obj_info;
172         u32 i, j, size;
173         u16 max_pe_sds;
174
175         obj_info = hmc_info->hmc_obj;
176
177         get_64bit_val(buf, 0, &temp);
178         hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX);
179         max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS);
180
181         /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
182         if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID)
183                 max_pe_sds--;
184         hmc_fpm_misc->max_sds = max_pe_sds;
185         hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
186
187         for (i = I40IW_HMC_IW_QP, j = 8;
188              i <= I40IW_HMC_IW_ARP; i++, j += 8) {
189                 get_64bit_val(buf, j, &temp);
190                 if (i == I40IW_HMC_IW_QP)
191                         obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
192                 else if (i == I40IW_HMC_IW_CQ)
193                         obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
194                 else
195                         obj_info[i].max_cnt = (u32)temp;
196
197                 size = (u32)RS_64_1(temp, 32);
198                 obj_info[i].size = ((u64)1 << size);
199         }
200         for (i = I40IW_HMC_IW_MR, j = 48;
201                         i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
202                 get_64bit_val(buf, j, &temp);
203                 obj_info[i].max_cnt = (u32)temp;
204                 size = (u32)RS_64_1(temp, 32);
205                 obj_info[i].size = LS_64_1(1, size);
206         }
207
208         get_64bit_val(buf, 120, &temp);
209         hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
210         get_64bit_val(buf, 120, &temp);
211         hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
212         get_64bit_val(buf, 120, &temp);
213         hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
214         get_64bit_val(buf, 64, &temp);
215         hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
216         if (!hmc_fpm_misc->xf_block_size)
217                 return I40IW_ERR_INVALID_SIZE;
218         get_64bit_val(buf, 80, &temp);
219         hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
220         if (!hmc_fpm_misc->q1_block_size)
221                 return I40IW_ERR_INVALID_SIZE;
222         return 0;
223 }
224
225 /**
226  * i40iw_sc_pd_init - initialize sc pd struct
227  * @dev: sc device struct
228  * @pd: sc pd ptr
229  * @pd_id: pd_id for allocated pd
230  */
231 static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
232                              struct i40iw_sc_pd *pd,
233                              u16 pd_id)
234 {
235         pd->size = sizeof(*pd);
236         pd->pd_id = pd_id;
237         pd->dev = dev;
238 }
239
240 /**
241  * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
242  * @wqsize: size of the wq (sq, rq, srq) to encoded_size
243  * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
244  */
245 u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq)
246 {
247         u8 encoded_size = 0;
248
249         /* cqp sq's hw coded value starts from 1 for size of 4
250          * while it starts from 0 for qp' wq's.
251          */
252         if (cqpsq)
253                 encoded_size = 1;
254         wqsize >>= 2;
255         while (wqsize >>= 1)
256                 encoded_size++;
257         return encoded_size;
258 }
259
260 /**
261  * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
262  * @cqp: IWARP control queue pair pointer
263  * @info: IWARP control queue pair init info pointer
264  *
265  * Initializes the object and context buffers for a control Queue Pair.
266  */
267 static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
268                                                 struct i40iw_cqp_init_info *info)
269 {
270         u8 hw_sq_size;
271
272         if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) ||
273             (info->sq_size < I40IW_CQP_SW_SQSIZE_4) ||
274             ((info->sq_size & (info->sq_size - 1))))
275                 return I40IW_ERR_INVALID_SIZE;
276
277         hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true);
278         cqp->size = sizeof(*cqp);
279         cqp->sq_size = info->sq_size;
280         cqp->hw_sq_size = hw_sq_size;
281         cqp->sq_base = info->sq;
282         cqp->host_ctx = info->host_ctx;
283         cqp->sq_pa = info->sq_pa;
284         cqp->host_ctx_pa = info->host_ctx_pa;
285         cqp->dev = info->dev;
286         cqp->struct_ver = info->struct_ver;
287         cqp->scratch_array = info->scratch_array;
288         cqp->polarity = 0;
289         cqp->en_datacenter_tcp = info->en_datacenter_tcp;
290         cqp->enabled_vf_count = info->enabled_vf_count;
291         cqp->hmc_profile = info->hmc_profile;
292         info->dev->cqp = cqp;
293
294         I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
295         i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
296                     "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
297                     __func__, cqp->sq_size, cqp->hw_sq_size,
298                     cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);
299         return 0;
300 }
301
302 /**
303  * i40iw_sc_cqp_create - create cqp during bringup
304  * @cqp: struct for cqp hw
305  * @disable_pfpdus: if pfpdu to be disabled
306  * @maj_err: If error, major err number
307  * @min_err: If error, minor err number
308  */
309 static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
310                                                   bool disable_pfpdus,
311                                                   u16 *maj_err,
312                                                   u16 *min_err)
313 {
314         u64 temp;
315         u32 cnt = 0, p1, p2, val = 0, err_code;
316         enum i40iw_status_code ret_code;
317
318         ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
319                                           &cqp->sdbuf,
320                                           128,
321                                           I40IW_SD_BUF_ALIGNMENT);
322
323         if (ret_code)
324                 goto exit;
325
326         temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
327                LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
328
329         if (disable_pfpdus)
330                 temp |= LS_64(1, I40IW_CQPHC_DISABLE_PFPDUS);
331
332         set_64bit_val(cqp->host_ctx, 0, temp);
333         set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
334         temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
335                LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);
336         set_64bit_val(cqp->host_ctx, 16, temp);
337         set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
338         set_64bit_val(cqp->host_ctx, 32, 0);
339         set_64bit_val(cqp->host_ctx, 40, 0);
340         set_64bit_val(cqp->host_ctx, 48, 0);
341         set_64bit_val(cqp->host_ctx, 56, 0);
342
343         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX",
344                         cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);
345
346         p1 = RS_32_1(cqp->host_ctx_pa, 32);
347         p2 = (u32)cqp->host_ctx_pa;
348
349         if (cqp->dev->is_pf) {
350                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);
351                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);
352         } else {
353                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);
354                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);
355         }
356         do {
357                 if (cnt++ > I40IW_DONE_COUNT) {
358                         i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
359                         ret_code = I40IW_ERR_TIMEOUT;
360                         /*
361                          * read PFPE_CQPERRORCODES register to get the minor
362                          * and major error code
363                          */
364                         if (cqp->dev->is_pf)
365                                 err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);
366                         else
367                                 err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
368                         *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE);
369                         *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE);
370                         goto exit;
371                 }
372                 udelay(I40IW_SLEEP_COUNT);
373                 if (cqp->dev->is_pf)
374                         val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);
375                 else
376                         val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);
377         } while (!val);
378
379 exit:
380         if (!ret_code)
381                 cqp->process_cqp_sds = i40iw_update_sds_noccq;
382         return ret_code;
383 }
384
385 /**
386  * i40iw_sc_cqp_post_sq - post of cqp's sq
387  * @cqp: struct for cqp hw
388  */
389 void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
390 {
391         if (cqp->dev->is_pf)
392                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
393         else
394                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
395
396         i40iw_debug(cqp->dev,
397                     I40IW_DEBUG_WQE,
398                     "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
399                     __func__,
400                     cqp->sq_ring.head,
401                     cqp->sq_ring.tail,
402                     cqp->sq_ring.size);
403 }
404
405 /**
406  * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
407  * @cqp: struct for cqp hw
408  * @wqe_idx: we index of cqp ring
409  */
410 u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
411 {
412         u64 *wqe = NULL;
413         u32     wqe_idx;
414         enum i40iw_status_code ret_code;
415
416         if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
417                 i40iw_debug(cqp->dev,
418                             I40IW_DEBUG_WQE,
419                             "%s: ring is full head %x tail %x size %x\n",
420                             __func__,
421                             cqp->sq_ring.head,
422                             cqp->sq_ring.tail,
423                             cqp->sq_ring.size);
424                 return NULL;
425         }
426         I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
427         if (ret_code)
428                 return NULL;
429         if (!wqe_idx)
430                 cqp->polarity = !cqp->polarity;
431
432         wqe = cqp->sq_base[wqe_idx].elem;
433         cqp->scratch_array[wqe_idx] = scratch;
434         I40IW_CQP_INIT_WQE(wqe);
435
436         return wqe;
437 }
438
439 /**
440  * i40iw_sc_cqp_destroy - destroy cqp during close
441  * @cqp: struct for cqp hw
442  */
443 static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)
444 {
445         u32 cnt = 0, val = 1;
446         enum i40iw_status_code ret_code = 0;
447         u32 cqpstat_addr;
448
449         if (cqp->dev->is_pf) {
450                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);
451                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);
452                 cqpstat_addr = I40E_PFPE_CCQPSTATUS;
453         } else {
454                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);
455                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);
456                 cqpstat_addr = I40E_VFPE_CCQPSTATUS1;
457         }
458         do {
459                 if (cnt++ > I40IW_DONE_COUNT) {
460                         ret_code = I40IW_ERR_TIMEOUT;
461                         break;
462                 }
463                 udelay(I40IW_SLEEP_COUNT);
464                 val = i40iw_rd32(cqp->dev->hw, cqpstat_addr);
465         } while (val);
466
467         i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
468         return ret_code;
469 }
470
471 /**
472  * i40iw_sc_ccq_arm - enable intr for control cq
473  * @ccq: ccq sc struct
474  */
475 static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq)
476 {
477         u64 temp_val;
478         u16 sw_cq_sel;
479         u8 arm_next_se;
480         u8 arm_seq_num;
481
482         /* write to cq doorbell shadow area */
483         /* arm next se should always be zero */
484         get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
485
486         sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
487         arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
488
489         arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
490         arm_seq_num++;
491
492         temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
493                    LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
494                    LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
495                    LS_64(1, I40IW_CQ_DBSA_ARM_NEXT);
496
497         set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
498
499         wmb();       /* make sure shadow area is updated before arming */
500
501         if (ccq->dev->is_pf)
502                 i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id);
503         else
504                 i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id);
505 }
506
507 /**
508  * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
509  * @ccq: ccq sc struct
510  * @info: completion q entry to return
511  */
512 static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
513                                         struct i40iw_sc_cq *ccq,
514                                         struct i40iw_ccq_cqe_info *info)
515 {
516         u64 qp_ctx, temp, temp1;
517         u64 *cqe;
518         struct i40iw_sc_cqp *cqp;
519         u32 wqe_idx;
520         u8 polarity;
521         enum i40iw_status_code ret_code = 0;
522
523         if (ccq->cq_uk.avoid_mem_cflct)
524                 cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);
525         else
526                 cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);
527
528         get_64bit_val(cqe, 24, &temp);
529         polarity = (u8)RS_64(temp, I40IW_CQ_VALID);
530         if (polarity != ccq->cq_uk.polarity)
531                 return I40IW_ERR_QUEUE_EMPTY;
532
533         get_64bit_val(cqe, 8, &qp_ctx);
534         cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;
535         info->error = (bool)RS_64(temp, I40IW_CQ_ERROR);
536         info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
537         if (info->error) {
538                 info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR);
539                 info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
540         }
541         wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX);
542         info->scratch = cqp->scratch_array[wqe_idx];
543
544         get_64bit_val(cqe, 16, &temp1);
545         info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL);
546         get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
547         info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE);
548         info->cqp = cqp;
549
550         /*  move the head for cq */
551         I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
552         if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0)
553                 ccq->cq_uk.polarity ^= 1;
554
555         /* update cq tail in cq shadow memory also */
556         I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
557         set_64bit_val(ccq->cq_uk.shadow_area,
558                       0,
559                       I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
560         wmb(); /* write shadow area before tail */
561         I40IW_RING_MOVE_TAIL(cqp->sq_ring);
562         return ret_code;
563 }
564
565 /**
566  * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
567  * @cqp: struct for cqp hw
568  * @op_code: cqp opcode for completion
569  * @info: completion q entry to return
570  */
571 static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
572                                         struct i40iw_sc_cqp *cqp,
573                                         u8 op_code,
574                                         struct i40iw_ccq_cqe_info *compl_info)
575 {
576         struct i40iw_ccq_cqe_info info;
577         struct i40iw_sc_cq *ccq;
578         enum i40iw_status_code ret_code = 0;
579         u32 cnt = 0;
580
581         memset(&info, 0, sizeof(info));
582         ccq = cqp->dev->ccq;
583         while (1) {
584                 if (cnt++ > I40IW_DONE_COUNT)
585                         return I40IW_ERR_TIMEOUT;
586
587                 if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) {
588                         udelay(I40IW_SLEEP_COUNT);
589                         continue;
590                 }
591
592                 if (info.error) {
593                         ret_code = I40IW_ERR_CQP_COMPL_ERROR;
594                         break;
595                 }
596                 /* check if opcode is cq create */
597                 if (op_code != info.op_code) {
598                         i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
599                                     "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
600                                     __func__, op_code, info.op_code);
601                 }
602                 /* success, exit out of the loop */
603                 if (op_code == info.op_code)
604                         break;
605         }
606
607         if (compl_info)
608                 memcpy(compl_info, &info, sizeof(*compl_info));
609
610         return ret_code;
611 }
612
613 /**
614  * i40iw_sc_manage_push_page - Handle push page
615  * @cqp: struct for cqp hw
616  * @info: push page info
617  * @scratch: u64 saved to be used during cqp completion
618  * @post_sq: flag for cqp db to ring
619  */
620 static enum i40iw_status_code i40iw_sc_manage_push_page(
621                                 struct i40iw_sc_cqp *cqp,
622                                 struct i40iw_cqp_manage_push_page_info *info,
623                                 u64 scratch,
624                                 bool post_sq)
625 {
626         u64 *wqe;
627         u64 header;
628
629         if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT)
630                 return I40IW_ERR_INVALID_PUSH_PAGE_INDEX;
631
632         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
633         if (!wqe)
634                 return I40IW_ERR_RING_FULL;
635
636         set_64bit_val(wqe, 16, info->qs_handle);
637
638         header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) |
639                  LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) |
640                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
641                  LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE);
642
643         i40iw_insert_wqe_hdr(wqe, header);
644
645         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE",
646                         wqe, I40IW_CQP_WQE_SIZE * 8);
647
648         if (post_sq)
649                 i40iw_sc_cqp_post_sq(cqp);
650         return 0;
651 }
652
653 /**
654  * i40iw_sc_manage_hmc_pm_func_table - manage of function table
655  * @cqp: struct for cqp hw
656  * @scratch: u64 saved to be used during cqp completion
657  * @vf_index: vf index for cqp
658  * @free_pm_fcn: function number
659  * @post_sq: flag for cqp db to ring
660  */
661 static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table(
662                                 struct i40iw_sc_cqp *cqp,
663                                 u64 scratch,
664                                 u8 vf_index,
665                                 bool free_pm_fcn,
666                                 bool post_sq)
667 {
668         u64 *wqe;
669         u64 header;
670
671         if (vf_index >= I40IW_MAX_VF_PER_PF)
672                 return I40IW_ERR_INVALID_VF_ID;
673         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
674         if (!wqe)
675                 return I40IW_ERR_RING_FULL;
676
677         header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) |
678                  LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) |
679                  LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) |
680                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
681
682         i40iw_insert_wqe_hdr(wqe, header);
683         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
684                         wqe, I40IW_CQP_WQE_SIZE * 8);
685         if (post_sq)
686                 i40iw_sc_cqp_post_sq(cqp);
687         return 0;
688 }
689
690 /**
691  * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
692  * @cqp: struct for cqp hw
693  * @scratch: u64 saved to be used during cqp completion
694  * @hmc_profile_type: type of profile to set
695  * @vf_num: vf number for profile
696  * @post_sq: flag for cqp db to ring
697  * @poll_registers: flag to poll register for cqp completion
698  */
699 static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile(
700                                 struct i40iw_sc_cqp *cqp,
701                                 u64 scratch,
702                                 u8 hmc_profile_type,
703                                 u8 vf_num, bool post_sq,
704                                 bool poll_registers)
705 {
706         u64 *wqe;
707         u64 header;
708         u32 val, tail, error;
709         enum i40iw_status_code ret_code = 0;
710
711         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
712         if (!wqe)
713                 return I40IW_ERR_RING_FULL;
714
715         set_64bit_val(wqe, 16,
716                       (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) |
717                                 LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM)));
718
719         header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) |
720                        LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
721
722         i40iw_insert_wqe_hdr(wqe, header);
723
724         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
725                         wqe, I40IW_CQP_WQE_SIZE * 8);
726
727         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
728         if (error)
729                 return I40IW_ERR_CQP_COMPL_ERROR;
730
731         if (post_sq) {
732                 i40iw_sc_cqp_post_sq(cqp);
733                 if (poll_registers)
734                         ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);
735                 else
736                         ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
737                                                                  I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
738                                                                  NULL);
739         }
740
741         return ret_code;
742 }
743
744 /**
745  * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
746  * @cqp: struct for cqp hw
747  */
748 static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)
749 {
750         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);
751 }
752
753 /**
754  * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
755  * @cqp: struct for cqp hw
756  */
757 static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)
758 {
759         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);
760 }
761
762 /**
763  * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
764  * @cqp: struct for cqp hw
765  * @scratch: u64 saved to be used during cqp completion
766  * @hmc_fn_id: hmc function id
767  * @commit_fpm_mem; Memory for fpm values
768  * @post_sq: flag for cqp db to ring
769  * @wait_type: poll ccq or cqp registers for cqp completion
770  */
771 static enum i40iw_status_code i40iw_sc_commit_fpm_values(
772                                         struct i40iw_sc_cqp *cqp,
773                                         u64 scratch,
774                                         u8 hmc_fn_id,
775                                         struct i40iw_dma_mem *commit_fpm_mem,
776                                         bool post_sq,
777                                         u8 wait_type)
778 {
779         u64 *wqe;
780         u64 header;
781         u32 tail, val, error;
782         enum i40iw_status_code ret_code = 0;
783
784         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
785         if (!wqe)
786                 return I40IW_ERR_RING_FULL;
787
788         set_64bit_val(wqe, 16, hmc_fn_id);
789         set_64bit_val(wqe, 32, commit_fpm_mem->pa);
790
791         header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
792                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
793
794         i40iw_insert_wqe_hdr(wqe, header);
795
796         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE",
797                         wqe, I40IW_CQP_WQE_SIZE * 8);
798
799         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
800         if (error)
801                 return I40IW_ERR_CQP_COMPL_ERROR;
802
803         if (post_sq) {
804                 i40iw_sc_cqp_post_sq(cqp);
805
806                 if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
807                         ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
808                 else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
809                         ret_code = i40iw_sc_commit_fpm_values_done(cqp);
810         }
811
812         return ret_code;
813 }
814
815 /**
816  * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
817  * @cqp: struct for cqp hw
818  */
819 static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)
820 {
821         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);
822 }
823
824 /**
825  * i40iw_sc_query_fpm_values - cqp wqe query fpm values
826  * @cqp: struct for cqp hw
827  * @scratch: u64 saved to be used during cqp completion
828  * @hmc_fn_id: hmc function id
829  * @query_fpm_mem: memory for return fpm values
830  * @post_sq: flag for cqp db to ring
831  * @wait_type: poll ccq or cqp registers for cqp completion
832  */
833 static enum i40iw_status_code i40iw_sc_query_fpm_values(
834                                         struct i40iw_sc_cqp *cqp,
835                                         u64 scratch,
836                                         u8 hmc_fn_id,
837                                         struct i40iw_dma_mem *query_fpm_mem,
838                                         bool post_sq,
839                                         u8 wait_type)
840 {
841         u64 *wqe;
842         u64 header;
843         u32 tail, val, error;
844         enum i40iw_status_code ret_code = 0;
845
846         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
847         if (!wqe)
848                 return I40IW_ERR_RING_FULL;
849
850         set_64bit_val(wqe, 16, hmc_fn_id);
851         set_64bit_val(wqe, 32, query_fpm_mem->pa);
852
853         header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
854                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
855
856         i40iw_insert_wqe_hdr(wqe, header);
857
858         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE",
859                         wqe, I40IW_CQP_WQE_SIZE * 8);
860
861         /* read the tail from CQP_TAIL register */
862         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
863
864         if (error)
865                 return I40IW_ERR_CQP_COMPL_ERROR;
866
867         if (post_sq) {
868                 i40iw_sc_cqp_post_sq(cqp);
869                 if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
870                         ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
871                 else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
872                         ret_code = i40iw_sc_query_fpm_values_done(cqp);
873         }
874
875         return ret_code;
876 }
877
878 /**
879  * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
880  * @cqp: struct for cqp hw
881  * @info: arp entry information
882  * @scratch: u64 saved to be used during cqp completion
883  * @post_sq: flag for cqp db to ring
884  */
885 static enum i40iw_status_code i40iw_sc_add_arp_cache_entry(
886                                 struct i40iw_sc_cqp *cqp,
887                                 struct i40iw_add_arp_cache_entry_info *info,
888                                 u64 scratch,
889                                 bool post_sq)
890 {
891         u64 *wqe;
892         u64 temp, header;
893
894         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
895         if (!wqe)
896                 return I40IW_ERR_RING_FULL;
897         set_64bit_val(wqe, 8, info->reach_max);
898
899         temp = info->mac_addr[5] |
900                LS_64_1(info->mac_addr[4], 8) |
901                LS_64_1(info->mac_addr[3], 16) |
902                LS_64_1(info->mac_addr[2], 24) |
903                LS_64_1(info->mac_addr[1], 32) |
904                LS_64_1(info->mac_addr[0], 40);
905
906         set_64bit_val(wqe, 16, temp);
907
908         header = info->arp_index |
909                  LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
910                  LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) |
911                  LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) |
912                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
913
914         i40iw_insert_wqe_hdr(wqe, header);
915
916         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE",
917                         wqe, I40IW_CQP_WQE_SIZE * 8);
918
919         if (post_sq)
920                 i40iw_sc_cqp_post_sq(cqp);
921         return 0;
922 }
923
924 /**
925  * i40iw_sc_del_arp_cache_entry - dele arp cache entry
926  * @cqp: struct for cqp hw
927  * @scratch: u64 saved to be used during cqp completion
928  * @arp_index: arp index to delete arp entry
929  * @post_sq: flag for cqp db to ring
930  */
931 static enum i40iw_status_code i40iw_sc_del_arp_cache_entry(
932                                         struct i40iw_sc_cqp *cqp,
933                                         u64 scratch,
934                                         u16 arp_index,
935                                         bool post_sq)
936 {
937         u64 *wqe;
938         u64 header;
939
940         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
941         if (!wqe)
942                 return I40IW_ERR_RING_FULL;
943
944         header = arp_index |
945                  LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
946                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
947         i40iw_insert_wqe_hdr(wqe, header);
948
949         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
950                         wqe, I40IW_CQP_WQE_SIZE * 8);
951
952         if (post_sq)
953                 i40iw_sc_cqp_post_sq(cqp);
954         return 0;
955 }
956
957 /**
958  * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
959  * @cqp: struct for cqp hw
960  * @scratch: u64 saved to be used during cqp completion
961  * @arp_index: arp index to delete arp entry
962  * @post_sq: flag for cqp db to ring
963  */
964 static enum i40iw_status_code i40iw_sc_query_arp_cache_entry(
965                                 struct i40iw_sc_cqp *cqp,
966                                 u64 scratch,
967                                 u16 arp_index,
968                                 bool post_sq)
969 {
970         u64 *wqe;
971         u64 header;
972
973         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
974         if (!wqe)
975                 return I40IW_ERR_RING_FULL;
976
977         header = arp_index |
978                  LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
979                  LS_64(1, I40IW_CQPSQ_MAT_QUERY) |
980                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
981
982         i40iw_insert_wqe_hdr(wqe, header);
983
984         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE",
985                         wqe, I40IW_CQP_WQE_SIZE * 8);
986
987         if (post_sq)
988                 i40iw_sc_cqp_post_sq(cqp);
989         return 0;
990 }
991
992 /**
993  * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
994  * @cqp: struct for cqp hw
995  * @info: info for apbvt entry to add or delete
996  * @scratch: u64 saved to be used during cqp completion
997  * @post_sq: flag for cqp db to ring
998  */
999 static enum i40iw_status_code i40iw_sc_manage_apbvt_entry(
1000                                 struct i40iw_sc_cqp *cqp,
1001                                 struct i40iw_apbvt_info *info,
1002                                 u64 scratch,
1003                                 bool post_sq)
1004 {
1005         u64 *wqe;
1006         u64 header;
1007
1008         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1009         if (!wqe)
1010                 return I40IW_ERR_RING_FULL;
1011
1012         set_64bit_val(wqe, 16, info->port);
1013
1014         header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) |
1015                  LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) |
1016                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1017
1018         i40iw_insert_wqe_hdr(wqe, header);
1019
1020         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE",
1021                         wqe, I40IW_CQP_WQE_SIZE * 8);
1022
1023         if (post_sq)
1024                 i40iw_sc_cqp_post_sq(cqp);
1025         return 0;
1026 }
1027
1028 /**
1029  * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
1030  * @cqp: struct for cqp hw
1031  * @info: info for quad hash to manage
1032  * @scratch: u64 saved to be used during cqp completion
1033  * @post_sq: flag for cqp db to ring
1034  *
1035  * This is called before connection establishment is started. For passive connections, when
1036  * listener is created, it will call with entry type of  I40IW_QHASH_TYPE_TCP_SYN with local
1037  * ip address and tcp port. When SYN is received (passive connections) or
1038  * sent (active connections), this routine is called with entry type of
1039  * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
1040  *
1041  * When iwarp connection is done and its state moves to RTS, the quad hash entry in
1042  * the hardware will point to iwarp's qp number and requires no calls from the driver.
1043  */
1044 static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
1045                                         struct i40iw_sc_cqp *cqp,
1046                                         struct i40iw_qhash_table_info *info,
1047                                         u64 scratch,
1048                                         bool post_sq)
1049 {
1050         u64 *wqe;
1051         u64 qw1 = 0;
1052         u64 qw2 = 0;
1053         u64 temp;
1054
1055         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1056         if (!wqe)
1057                 return I40IW_ERR_RING_FULL;
1058
1059         temp = info->mac_addr[5] |
1060                 LS_64_1(info->mac_addr[4], 8) |
1061                 LS_64_1(info->mac_addr[3], 16) |
1062                 LS_64_1(info->mac_addr[2], 24) |
1063                 LS_64_1(info->mac_addr[1], 32) |
1064                 LS_64_1(info->mac_addr[0], 40);
1065
1066         set_64bit_val(wqe, 0, temp);
1067
1068         qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) |
1069               LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT);
1070         if (info->ipv4_valid) {
1071                 set_64bit_val(wqe,
1072                               48,
1073                               LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1074         } else {
1075                 set_64bit_val(wqe,
1076                               56,
1077                               LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1078                               LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1079
1080                 set_64bit_val(wqe,
1081                               48,
1082                               LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1083                               LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1084         }
1085         qw2 = LS_64(cqp->dev->qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
1086         if (info->vlan_valid)
1087                 qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
1088         set_64bit_val(wqe, 16, qw2);
1089         if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
1090                 qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT);
1091                 if (!info->ipv4_valid) {
1092                         set_64bit_val(wqe,
1093                                       40,
1094                                       LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1095                                       LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1096                         set_64bit_val(wqe,
1097                                       32,
1098                                       LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1099                                       LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1100                 } else {
1101                         set_64bit_val(wqe,
1102                                       32,
1103                                       LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1104                 }
1105         }
1106
1107         set_64bit_val(wqe, 8, qw1);
1108         temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |
1109                LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) |
1110                LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) |
1111                LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) |
1112                LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) |
1113                LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE);
1114
1115         i40iw_insert_wqe_hdr(wqe, temp);
1116
1117         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE",
1118                         wqe, I40IW_CQP_WQE_SIZE * 8);
1119
1120         if (post_sq)
1121                 i40iw_sc_cqp_post_sq(cqp);
1122         return 0;
1123 }
1124
1125 /**
1126  * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
1127  * @cqp: struct for cqp hw
1128  * @scratch: u64 saved to be used during cqp completion
1129  * @post_sq: flag for cqp db to ring
1130  */
1131 static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry(
1132                                         struct i40iw_sc_cqp *cqp,
1133                                         u64 scratch,
1134                                         bool post_sq)
1135 {
1136         u64 *wqe;
1137         u64 header;
1138
1139         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1140         if (!wqe)
1141                 return I40IW_ERR_RING_FULL;
1142         header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) |
1143                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1144
1145         i40iw_insert_wqe_hdr(wqe, header);
1146         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
1147                         wqe, I40IW_CQP_WQE_SIZE * 8);
1148         if (post_sq)
1149                 i40iw_sc_cqp_post_sq(cqp);
1150         return 0;
1151 }
1152
1153 /**
1154  * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
1155  * @cqp: struct for cqp hw
1156  * @info:mac addr info
1157  * @scratch: u64 saved to be used during cqp completion
1158  * @post_sq: flag for cqp db to ring
1159  */
1160 static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
1161                                 struct i40iw_sc_cqp *cqp,
1162                                 struct i40iw_local_mac_ipaddr_entry_info *info,
1163                                 u64 scratch,
1164                                 bool post_sq)
1165 {
1166         u64 *wqe;
1167         u64 temp, header;
1168
1169         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1170         if (!wqe)
1171                 return I40IW_ERR_RING_FULL;
1172         temp = info->mac_addr[5] |
1173                 LS_64_1(info->mac_addr[4], 8) |
1174                 LS_64_1(info->mac_addr[3], 16) |
1175                 LS_64_1(info->mac_addr[2], 24) |
1176                 LS_64_1(info->mac_addr[1], 32) |
1177                 LS_64_1(info->mac_addr[0], 40);
1178
1179         set_64bit_val(wqe, 32, temp);
1180
1181         header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1182                  LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1183                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1184
1185         i40iw_insert_wqe_hdr(wqe, header);
1186
1187         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE",
1188                         wqe, I40IW_CQP_WQE_SIZE * 8);
1189
1190         if (post_sq)
1191                 i40iw_sc_cqp_post_sq(cqp);
1192         return 0;
1193 }
1194
1195 /**
1196  * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
1197  * @cqp: struct for cqp hw
1198  * @scratch: u64 saved to be used during cqp completion
1199  * @entry_idx: index of mac entry
1200  * @ ignore_ref_count: to force mac adde delete
1201  * @post_sq: flag for cqp db to ring
1202  */
1203 static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
1204                                 struct i40iw_sc_cqp *cqp,
1205                                 u64 scratch,
1206                                 u8 entry_idx,
1207                                 u8 ignore_ref_count,
1208                                 bool post_sq)
1209 {
1210         u64 *wqe;
1211         u64 header;
1212
1213         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1214         if (!wqe)
1215                 return I40IW_ERR_RING_FULL;
1216         header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1217                  LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1218                  LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) |
1219                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
1220                  LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT);
1221
1222         i40iw_insert_wqe_hdr(wqe, header);
1223
1224         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
1225                         wqe, I40IW_CQP_WQE_SIZE * 8);
1226
1227         if (post_sq)
1228                 i40iw_sc_cqp_post_sq(cqp);
1229         return 0;
1230 }
1231
1232 /**
1233  * i40iw_sc_cqp_nop - send a nop wqe
1234  * @cqp: struct for cqp hw
1235  * @scratch: u64 saved to be used during cqp completion
1236  * @post_sq: flag for cqp db to ring
1237  */
1238 static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,
1239                                                u64 scratch,
1240                                                bool post_sq)
1241 {
1242         u64 *wqe;
1243         u64 header;
1244
1245         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1246         if (!wqe)
1247                 return I40IW_ERR_RING_FULL;
1248         header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) |
1249                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1250         i40iw_insert_wqe_hdr(wqe, header);
1251         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE",
1252                         wqe, I40IW_CQP_WQE_SIZE * 8);
1253
1254         if (post_sq)
1255                 i40iw_sc_cqp_post_sq(cqp);
1256         return 0;
1257 }
1258
1259 /**
1260  * i40iw_sc_ceq_init - initialize ceq
1261  * @ceq: ceq sc structure
1262  * @info: ceq initialization info
1263  */
1264 static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq,
1265                                                 struct i40iw_ceq_init_info *info)
1266 {
1267         u32 pble_obj_cnt;
1268
1269         if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) ||
1270             (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES))
1271                 return I40IW_ERR_INVALID_SIZE;
1272
1273         if (info->ceq_id >= I40IW_MAX_CEQID)
1274                 return I40IW_ERR_INVALID_CEQ_ID;
1275
1276         pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1277
1278         if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1279                 return I40IW_ERR_INVALID_PBLE_INDEX;
1280
1281         ceq->size = sizeof(*ceq);
1282         ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base;
1283         ceq->ceq_id = info->ceq_id;
1284         ceq->dev = info->dev;
1285         ceq->elem_cnt = info->elem_cnt;
1286         ceq->ceq_elem_pa = info->ceqe_pa;
1287         ceq->virtual_map = info->virtual_map;
1288
1289         ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
1290         ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
1291         ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
1292
1293         ceq->tph_en = info->tph_en;
1294         ceq->tph_val = info->tph_val;
1295         ceq->polarity = 1;
1296         I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
1297         ceq->dev->ceq[info->ceq_id] = ceq;
1298
1299         return 0;
1300 }
1301
1302 /**
1303  * i40iw_sc_ceq_create - create ceq wqe
1304  * @ceq: ceq sc structure
1305  * @scratch: u64 saved to be used during cqp completion
1306  * @post_sq: flag for cqp db to ring
1307  */
1308 static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq,
1309                                                   u64 scratch,
1310                                                   bool post_sq)
1311 {
1312         struct i40iw_sc_cqp *cqp;
1313         u64 *wqe;
1314         u64 header;
1315
1316         cqp = ceq->dev->cqp;
1317         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1318         if (!wqe)
1319                 return I40IW_ERR_RING_FULL;
1320         set_64bit_val(wqe, 16, ceq->elem_cnt);
1321         set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
1322         set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
1323         set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL));
1324
1325         header = ceq->ceq_id |
1326                  LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) |
1327                  LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1328                  LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1329                  LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1330                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1331
1332         i40iw_insert_wqe_hdr(wqe, header);
1333
1334         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE",
1335                         wqe, I40IW_CQP_WQE_SIZE * 8);
1336
1337         if (post_sq)
1338                 i40iw_sc_cqp_post_sq(cqp);
1339         return 0;
1340 }
1341
1342 /**
1343  * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
1344  * @ceq: ceq sc structure
1345  */
1346 static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)
1347 {
1348         struct i40iw_sc_cqp *cqp;
1349
1350         cqp = ceq->dev->cqp;
1351         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
1352 }
1353
1354 /**
1355  * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
1356  * @ceq: ceq sc structure
1357  */
1358 static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq)
1359 {
1360         struct i40iw_sc_cqp *cqp;
1361
1362         cqp = ceq->dev->cqp;
1363         cqp->process_cqp_sds = i40iw_update_sds_noccq;
1364         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);
1365 }
1366
1367 /**
1368  * i40iw_sc_cceq_create - create cceq
1369  * @ceq: ceq sc structure
1370  * @scratch: u64 saved to be used during cqp completion
1371  */
1372 static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch)
1373 {
1374         enum i40iw_status_code ret_code;
1375
1376         ret_code = i40iw_sc_ceq_create(ceq, scratch, true);
1377         if (!ret_code)
1378                 ret_code = i40iw_sc_cceq_create_done(ceq);
1379         return ret_code;
1380 }
1381
1382 /**
1383  * i40iw_sc_ceq_destroy - destroy ceq
1384  * @ceq: ceq sc structure
1385  * @scratch: u64 saved to be used during cqp completion
1386  * @post_sq: flag for cqp db to ring
1387  */
1388 static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq,
1389                                                    u64 scratch,
1390                                                    bool post_sq)
1391 {
1392         struct i40iw_sc_cqp *cqp;
1393         u64 *wqe;
1394         u64 header;
1395
1396         cqp = ceq->dev->cqp;
1397         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1398         if (!wqe)
1399                 return I40IW_ERR_RING_FULL;
1400         set_64bit_val(wqe, 16, ceq->elem_cnt);
1401         set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
1402         header = ceq->ceq_id |
1403                  LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) |
1404                  LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1405                  LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1406                  LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1407                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1408         i40iw_insert_wqe_hdr(wqe, header);
1409         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE",
1410                         wqe, I40IW_CQP_WQE_SIZE * 8);
1411
1412         if (post_sq)
1413                 i40iw_sc_cqp_post_sq(cqp);
1414         return 0;
1415 }
1416
1417 /**
1418  * i40iw_sc_process_ceq - process ceq
1419  * @dev: sc device struct
1420  * @ceq: ceq sc structure
1421  */
1422 static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq)
1423 {
1424         u64 temp;
1425         u64 *ceqe;
1426         struct i40iw_sc_cq *cq = NULL;
1427         u8 polarity;
1428
1429         ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq);
1430         get_64bit_val(ceqe, 0, &temp);
1431         polarity = (u8)RS_64(temp, I40IW_CEQE_VALID);
1432         if (polarity != ceq->polarity)
1433                 return cq;
1434
1435         cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1);
1436
1437         I40IW_RING_MOVE_TAIL(ceq->ceq_ring);
1438         if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0)
1439                 ceq->polarity ^= 1;
1440
1441         if (dev->is_pf)
1442                 i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id);
1443         else
1444                 i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id);
1445
1446         return cq;
1447 }
1448
1449 /**
1450  * i40iw_sc_aeq_init - initialize aeq
1451  * @aeq: aeq structure ptr
1452  * @info: aeq initialization info
1453  */
1454 static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq,
1455                                                 struct i40iw_aeq_init_info *info)
1456 {
1457         u32 pble_obj_cnt;
1458
1459         if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) ||
1460             (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES))
1461                 return I40IW_ERR_INVALID_SIZE;
1462         pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1463
1464         if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1465                 return I40IW_ERR_INVALID_PBLE_INDEX;
1466
1467         aeq->size = sizeof(*aeq);
1468         aeq->polarity = 1;
1469         aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base;
1470         aeq->dev = info->dev;
1471         aeq->elem_cnt = info->elem_cnt;
1472
1473         aeq->aeq_elem_pa = info->aeq_elem_pa;
1474         I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
1475         info->dev->aeq = aeq;
1476
1477         aeq->virtual_map = info->virtual_map;
1478         aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
1479         aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
1480         aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
1481         info->dev->aeq = aeq;
1482         return 0;
1483 }
1484
1485 /**
1486  * i40iw_sc_aeq_create - create aeq
1487  * @aeq: aeq structure ptr
1488  * @scratch: u64 saved to be used during cqp completion
1489  * @post_sq: flag for cqp db to ring
1490  */
1491 static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq,
1492                                                   u64 scratch,
1493                                                   bool post_sq)
1494 {
1495         u64 *wqe;
1496         struct i40iw_sc_cqp *cqp;
1497         u64 header;
1498
1499         cqp = aeq->dev->cqp;
1500         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1501         if (!wqe)
1502                 return I40IW_ERR_RING_FULL;
1503         set_64bit_val(wqe, 16, aeq->elem_cnt);
1504         set_64bit_val(wqe, 32,
1505                       (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
1506         set_64bit_val(wqe, 48,
1507                       (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
1508
1509         header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) |
1510                  LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1511                  LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1512                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1513
1514         i40iw_insert_wqe_hdr(wqe, header);
1515         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE",
1516                         wqe, I40IW_CQP_WQE_SIZE * 8);
1517         if (post_sq)
1518                 i40iw_sc_cqp_post_sq(cqp);
1519         return 0;
1520 }
1521
1522 /**
1523  * i40iw_sc_aeq_destroy - destroy aeq during close
1524  * @aeq: aeq structure ptr
1525  * @scratch: u64 saved to be used during cqp completion
1526  * @post_sq: flag for cqp db to ring
1527  */
1528 static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq,
1529                                                    u64 scratch,
1530                                                    bool post_sq)
1531 {
1532         u64 *wqe;
1533         struct i40iw_sc_cqp *cqp;
1534         u64 header;
1535
1536         cqp = aeq->dev->cqp;
1537         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1538         if (!wqe)
1539                 return I40IW_ERR_RING_FULL;
1540         set_64bit_val(wqe, 16, aeq->elem_cnt);
1541         set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
1542         header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) |
1543                  LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1544                  LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1545                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1546         i40iw_insert_wqe_hdr(wqe, header);
1547
1548         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE",
1549                         wqe, I40IW_CQP_WQE_SIZE * 8);
1550         if (post_sq)
1551                 i40iw_sc_cqp_post_sq(cqp);
1552         return 0;
1553 }
1554
1555 /**
1556  * i40iw_sc_get_next_aeqe - get next aeq entry
1557  * @aeq: aeq structure ptr
1558  * @info: aeqe info to be returned
1559  */
1560 static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
1561                                                      struct i40iw_aeqe_info *info)
1562 {
1563         u64 temp, compl_ctx;
1564         u64 *aeqe;
1565         u16 wqe_idx;
1566         u8 ae_src;
1567         u8 polarity;
1568
1569         aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq);
1570         get_64bit_val(aeqe, 0, &compl_ctx);
1571         get_64bit_val(aeqe, 8, &temp);
1572         polarity = (u8)RS_64(temp, I40IW_AEQE_VALID);
1573
1574         if (aeq->polarity != polarity)
1575                 return I40IW_ERR_QUEUE_EMPTY;
1576
1577         i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16);
1578
1579         ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC);
1580         wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX);
1581         info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID);
1582         info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE);
1583         info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE);
1584         info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
1585         info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
1586         info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
1587         switch (ae_src) {
1588         case I40IW_AE_SOURCE_RQ:
1589         case I40IW_AE_SOURCE_RQ_0011:
1590                 info->qp = true;
1591                 info->wqe_idx = wqe_idx;
1592                 info->compl_ctx = compl_ctx;
1593                 break;
1594         case I40IW_AE_SOURCE_CQ:
1595         case I40IW_AE_SOURCE_CQ_0110:
1596         case I40IW_AE_SOURCE_CQ_1010:
1597         case I40IW_AE_SOURCE_CQ_1110:
1598                 info->cq = true;
1599                 info->compl_ctx = LS_64_1(compl_ctx, 1);
1600                 break;
1601         case I40IW_AE_SOURCE_SQ:
1602         case I40IW_AE_SOURCE_SQ_0111:
1603                 info->qp = true;
1604                 info->sq = true;
1605                 info->wqe_idx = wqe_idx;
1606                 info->compl_ctx = compl_ctx;
1607                 break;
1608         case I40IW_AE_SOURCE_IN_RR_WR:
1609         case I40IW_AE_SOURCE_IN_RR_WR_1011:
1610                 info->qp = true;
1611                 info->compl_ctx = compl_ctx;
1612                 info->in_rdrsp_wr = true;
1613                 break;
1614         case I40IW_AE_SOURCE_OUT_RR:
1615         case I40IW_AE_SOURCE_OUT_RR_1111:
1616                 info->qp = true;
1617                 info->compl_ctx = compl_ctx;
1618                 info->out_rdrsp = true;
1619                 break;
1620         default:
1621                 break;
1622         }
1623         I40IW_RING_MOVE_TAIL(aeq->aeq_ring);
1624         if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0)
1625                 aeq->polarity ^= 1;
1626         return 0;
1627 }
1628
1629 /**
1630  * i40iw_sc_repost_aeq_entries - repost completed aeq entries
1631  * @dev: sc device struct
1632  * @count: allocate count
1633  */
1634 static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
1635                                                           u32 count)
1636 {
1637         if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT)
1638                 return I40IW_ERR_INVALID_SIZE;
1639
1640         if (dev->is_pf)
1641                 i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
1642         else
1643                 i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count);
1644
1645         return 0;
1646 }
1647
1648 /**
1649  * i40iw_sc_aeq_create_done - create aeq
1650  * @aeq: aeq structure ptr
1651  */
1652 static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq)
1653 {
1654         struct i40iw_sc_cqp *cqp;
1655
1656         cqp = aeq->dev->cqp;
1657         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);
1658 }
1659
1660 /**
1661  * i40iw_sc_aeq_destroy_done - destroy of aeq during close
1662  * @aeq: aeq structure ptr
1663  */
1664 static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq)
1665 {
1666         struct i40iw_sc_cqp *cqp;
1667
1668         cqp = aeq->dev->cqp;
1669         return  i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);
1670 }
1671
1672 /**
1673  * i40iw_sc_ccq_init - initialize control cq
1674  * @cq: sc's cq ctruct
1675  * @info: info for control cq initialization
1676  */
1677 static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq,
1678                                                 struct i40iw_ccq_init_info *info)
1679 {
1680         u32 pble_obj_cnt;
1681
1682         if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE)
1683                 return I40IW_ERR_INVALID_SIZE;
1684
1685         if (info->ceq_id > I40IW_MAX_CEQID)
1686                 return I40IW_ERR_INVALID_CEQ_ID;
1687
1688         pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1689
1690         if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1691                 return I40IW_ERR_INVALID_PBLE_INDEX;
1692
1693         cq->cq_pa = info->cq_pa;
1694         cq->cq_uk.cq_base = info->cq_base;
1695         cq->shadow_area_pa = info->shadow_area_pa;
1696         cq->cq_uk.shadow_area = info->shadow_area;
1697         cq->shadow_read_threshold = info->shadow_read_threshold;
1698         cq->dev = info->dev;
1699         cq->ceq_id = info->ceq_id;
1700         cq->cq_uk.cq_size = info->num_elem;
1701         cq->cq_type = I40IW_CQ_TYPE_CQP;
1702         cq->ceqe_mask = info->ceqe_mask;
1703         I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
1704
1705         cq->cq_uk.cq_id = 0;    /* control cq is id 0 always */
1706         cq->ceq_id_valid = info->ceq_id_valid;
1707         cq->tph_en = info->tph_en;
1708         cq->tph_val = info->tph_val;
1709         cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
1710
1711         cq->pbl_list = info->pbl_list;
1712         cq->virtual_map = info->virtual_map;
1713         cq->pbl_chunk_size = info->pbl_chunk_size;
1714         cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
1715         cq->cq_uk.polarity = true;
1716
1717         /* following are only for iw cqs so initialize them to zero */
1718         cq->cq_uk.cqe_alloc_reg = NULL;
1719         info->dev->ccq = cq;
1720         return 0;
1721 }
1722
1723 /**
1724  * i40iw_sc_ccq_create_done - poll cqp for ccq create
1725  * @ccq: ccq sc struct
1726  */
1727 static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq)
1728 {
1729         struct i40iw_sc_cqp *cqp;
1730
1731         cqp = ccq->dev->cqp;
1732         return  i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);
1733 }
1734
1735 /**
1736  * i40iw_sc_ccq_create - create control cq
1737  * @ccq: ccq sc struct
1738  * @scratch: u64 saved to be used during cqp completion
1739  * @check_overflow: overlow flag for ccq
1740  * @post_sq: flag for cqp db to ring
1741  */
1742 static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq,
1743                                                   u64 scratch,
1744                                                   bool check_overflow,
1745                                                   bool post_sq)
1746 {
1747         u64 *wqe;
1748         struct i40iw_sc_cqp *cqp;
1749         u64 header;
1750         enum i40iw_status_code ret_code;
1751
1752         cqp = ccq->dev->cqp;
1753         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1754         if (!wqe)
1755                 return I40IW_ERR_RING_FULL;
1756         set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
1757         set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
1758         set_64bit_val(wqe, 16,
1759                       LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
1760         set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa));
1761         set_64bit_val(wqe, 40, ccq->shadow_area_pa);
1762         set_64bit_val(wqe, 48,
1763                       (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0));
1764         set_64bit_val(wqe, 56,
1765                       LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL));
1766
1767         header = ccq->cq_uk.cq_id |
1768                  LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
1769                  LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
1770                  LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
1771                  LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
1772                  LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
1773                  LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
1774                  LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
1775                  LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
1776                  LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
1777                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1778
1779         i40iw_insert_wqe_hdr(wqe, header);
1780
1781         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE",
1782                         wqe, I40IW_CQP_WQE_SIZE * 8);
1783
1784         if (post_sq) {
1785                 i40iw_sc_cqp_post_sq(cqp);
1786                 ret_code = i40iw_sc_ccq_create_done(ccq);
1787                 if (ret_code)
1788                         return ret_code;
1789         }
1790         cqp->process_cqp_sds = i40iw_cqp_sds_cmd;
1791
1792         return 0;
1793 }
1794
1795 /**
1796  * i40iw_sc_ccq_destroy - destroy ccq during close
1797  * @ccq: ccq sc struct
1798  * @scratch: u64 saved to be used during cqp completion
1799  * @post_sq: flag for cqp db to ring
1800  */
1801 static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
1802                                                    u64 scratch,
1803                                                    bool post_sq)
1804 {
1805         struct i40iw_sc_cqp *cqp;
1806         u64 *wqe;
1807         u64 header;
1808         enum i40iw_status_code ret_code = 0;
1809         u32 tail, val, error;
1810
1811         cqp = ccq->dev->cqp;
1812         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1813         if (!wqe)
1814                 return I40IW_ERR_RING_FULL;
1815         set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
1816         set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
1817         set_64bit_val(wqe, 40, ccq->shadow_area_pa);
1818
1819         header = ccq->cq_uk.cq_id |
1820                  LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
1821                  LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
1822                  LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
1823                  LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
1824                  LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
1825                  LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
1826                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1827
1828         i40iw_insert_wqe_hdr(wqe, header);
1829
1830         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE",
1831                         wqe, I40IW_CQP_WQE_SIZE * 8);
1832
1833         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
1834         if (error)
1835                 return I40IW_ERR_CQP_COMPL_ERROR;
1836
1837         if (post_sq) {
1838                 i40iw_sc_cqp_post_sq(cqp);
1839                 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
1840         }
1841
1842         return ret_code;
1843 }
1844
1845 /**
1846  * i40iw_sc_cq_init - initialize completion q
1847  * @cq: cq struct
1848  * @info: cq initialization info
1849  */
1850 static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq,
1851                                                struct i40iw_cq_init_info *info)
1852 {
1853         u32 __iomem *cqe_alloc_reg = NULL;
1854         enum i40iw_status_code ret_code;
1855         u32 pble_obj_cnt;
1856         u32 arm_offset;
1857
1858         pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1859
1860         if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1861                 return I40IW_ERR_INVALID_PBLE_INDEX;
1862
1863         cq->cq_pa = info->cq_base_pa;
1864         cq->dev = info->dev;
1865         cq->ceq_id = info->ceq_id;
1866         arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1;
1867         if (i40iw_get_hw_addr(cq->dev))
1868                 cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) +
1869                                               arm_offset);
1870         info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg;
1871         ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info);
1872         if (ret_code)
1873                 return ret_code;
1874         cq->virtual_map = info->virtual_map;
1875         cq->pbl_chunk_size = info->pbl_chunk_size;
1876         cq->ceqe_mask = info->ceqe_mask;
1877         cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP;
1878
1879         cq->shadow_area_pa = info->shadow_area_pa;
1880         cq->shadow_read_threshold = info->shadow_read_threshold;
1881
1882         cq->ceq_id_valid = info->ceq_id_valid;
1883         cq->tph_en = info->tph_en;
1884         cq->tph_val = info->tph_val;
1885
1886         cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
1887
1888         return 0;
1889 }
1890
1891 /**
1892  * i40iw_sc_cq_create - create completion q
1893  * @cq: cq struct
1894  * @scratch: u64 saved to be used during cqp completion
1895  * @check_overflow: flag for overflow check
1896  * @post_sq: flag for cqp db to ring
1897  */
1898 static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq,
1899                                                  u64 scratch,
1900                                                  bool check_overflow,
1901                                                  bool post_sq)
1902 {
1903         u64 *wqe;
1904         struct i40iw_sc_cqp *cqp;
1905         u64 header;
1906
1907         if (cq->cq_uk.cq_id > I40IW_MAX_CQID)
1908                 return I40IW_ERR_INVALID_CQ_ID;
1909
1910         if (cq->ceq_id > I40IW_MAX_CEQID)
1911                 return I40IW_ERR_INVALID_CEQ_ID;
1912
1913         cqp = cq->dev->cqp;
1914         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1915         if (!wqe)
1916                 return I40IW_ERR_RING_FULL;
1917
1918         set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
1919         set_64bit_val(wqe, 8, RS_64_1(cq, 1));
1920         set_64bit_val(wqe,
1921                       16,
1922                       LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
1923
1924         set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
1925
1926         set_64bit_val(wqe, 40, cq->shadow_area_pa);
1927         set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
1928         set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
1929
1930         header = cq->cq_uk.cq_id |
1931                  LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
1932                  LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
1933                  LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
1934                  LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
1935                  LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
1936                  LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
1937                  LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
1938                  LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
1939                  LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
1940                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1941
1942         i40iw_insert_wqe_hdr(wqe, header);
1943
1944         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE",
1945                         wqe, I40IW_CQP_WQE_SIZE * 8);
1946
1947         if (post_sq)
1948                 i40iw_sc_cqp_post_sq(cqp);
1949         return 0;
1950 }
1951
1952 /**
1953  * i40iw_sc_cq_destroy - destroy completion q
1954  * @cq: cq struct
1955  * @scratch: u64 saved to be used during cqp completion
1956  * @post_sq: flag for cqp db to ring
1957  */
1958 static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
1959                                                   u64 scratch,
1960                                                   bool post_sq)
1961 {
1962         struct i40iw_sc_cqp *cqp;
1963         u64 *wqe;
1964         u64 header;
1965
1966         cqp = cq->dev->cqp;
1967         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1968         if (!wqe)
1969                 return I40IW_ERR_RING_FULL;
1970         set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
1971         set_64bit_val(wqe, 8, RS_64_1(cq, 1));
1972         set_64bit_val(wqe, 40, cq->shadow_area_pa);
1973         set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
1974
1975         header = cq->cq_uk.cq_id |
1976                  LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
1977                  LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
1978                  LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
1979                  LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
1980                  LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
1981                  LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
1982                  LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
1983                  LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
1984                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1985
1986         i40iw_insert_wqe_hdr(wqe, header);
1987
1988         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE",
1989                         wqe, I40IW_CQP_WQE_SIZE * 8);
1990
1991         if (post_sq)
1992                 i40iw_sc_cqp_post_sq(cqp);
1993         return 0;
1994 }
1995
1996 /**
1997  * i40iw_sc_cq_modify - modify a Completion Queue
1998  * @cq: cq struct
1999  * @info: modification info struct
2000  * @scratch:
2001  * @post_sq: flag to post to sq
2002  */
2003 static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
2004                                                  struct i40iw_modify_cq_info *info,
2005                                                  u64 scratch,
2006                                                  bool post_sq)
2007 {
2008         struct i40iw_sc_cqp *cqp;
2009         u64 *wqe;
2010         u64 header;
2011         u32 cq_size, ceq_id, first_pm_pbl_idx;
2012         u8 pbl_chunk_size;
2013         bool virtual_map, ceq_id_valid, check_overflow;
2014         u32 pble_obj_cnt;
2015
2016         if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID))
2017                 return I40IW_ERR_INVALID_CEQ_ID;
2018
2019         pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2020
2021         if (info->cq_resize && info->virtual_map &&
2022             (info->first_pm_pbl_idx >= pble_obj_cnt))
2023                 return I40IW_ERR_INVALID_PBLE_INDEX;
2024
2025         cqp = cq->dev->cqp;
2026         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2027         if (!wqe)
2028                 return I40IW_ERR_RING_FULL;
2029
2030         cq->pbl_list = info->pbl_list;
2031         cq->cq_pa = info->cq_pa;
2032         cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2033
2034         cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size;
2035         if (info->ceq_change) {
2036                 ceq_id_valid = true;
2037                 ceq_id = info->ceq_id;
2038         } else {
2039                 ceq_id_valid = cq->ceq_id_valid;
2040                 ceq_id = ceq_id_valid ? cq->ceq_id : 0;
2041         }
2042         virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map;
2043         first_pm_pbl_idx = (info->cq_resize ?
2044                             (info->virtual_map ? info->first_pm_pbl_idx : 0) :
2045                             (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2046         pbl_chunk_size = (info->cq_resize ?
2047                           (info->virtual_map ? info->pbl_chunk_size : 0) :
2048                           (cq->virtual_map ? cq->pbl_chunk_size : 0));
2049         check_overflow = info->check_overflow_change ? info->check_overflow :
2050                          cq->check_overflow;
2051         cq->cq_uk.cq_size = cq_size;
2052         cq->ceq_id_valid = ceq_id_valid;
2053         cq->ceq_id = ceq_id;
2054         cq->virtual_map = virtual_map;
2055         cq->first_pm_pbl_idx = first_pm_pbl_idx;
2056         cq->pbl_chunk_size = pbl_chunk_size;
2057         cq->check_overflow = check_overflow;
2058
2059         set_64bit_val(wqe, 0, cq_size);
2060         set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2061         set_64bit_val(wqe, 16,
2062                       LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2063         set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2064         set_64bit_val(wqe, 40, cq->shadow_area_pa);
2065         set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0));
2066         set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
2067
2068         header = cq->cq_uk.cq_id |
2069                  LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) |
2070                  LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) |
2071                  LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) |
2072                  LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2073                  LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2074                  LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2075                  LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2076                  LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2077                  LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2078                  LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2079                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2080
2081         i40iw_insert_wqe_hdr(wqe, header);
2082
2083         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE",
2084                         wqe, I40IW_CQP_WQE_SIZE * 8);
2085
2086         if (post_sq)
2087                 i40iw_sc_cqp_post_sq(cqp);
2088         return 0;
2089 }
2090
2091 /**
2092  * i40iw_sc_qp_init - initialize qp
2093  * @qp: sc qp
2094  * @info: initialization qp info
2095  */
2096 static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
2097                                                struct i40iw_qp_init_info *info)
2098 {
2099         u32 __iomem *wqe_alloc_reg = NULL;
2100         enum i40iw_status_code ret_code;
2101         u32 pble_obj_cnt;
2102         u8 wqe_size;
2103         u32 offset;
2104
2105         qp->dev = info->pd->dev;
2106         qp->sq_pa = info->sq_pa;
2107         qp->rq_pa = info->rq_pa;
2108         qp->hw_host_ctx_pa = info->host_ctx_pa;
2109         qp->q2_pa = info->q2_pa;
2110         qp->shadow_area_pa = info->shadow_area_pa;
2111
2112         qp->q2_buf = info->q2;
2113         qp->pd = info->pd;
2114         qp->hw_host_ctx = info->host_ctx;
2115         offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1;
2116         if (i40iw_get_hw_addr(qp->pd->dev))
2117                 wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
2118                                               offset);
2119
2120         info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
2121         ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
2122         if (ret_code)
2123                 return ret_code;
2124         qp->virtual_map = info->virtual_map;
2125
2126         pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2127
2128         if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) ||
2129             (info->virtual_map && (info->rq_pa >= pble_obj_cnt)))
2130                 return I40IW_ERR_INVALID_PBLE_INDEX;
2131
2132         qp->llp_stream_handle = (void *)(-1);
2133         qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP;
2134
2135         qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
2136                                                     false);
2137         i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
2138                     __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
2139         ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
2140                                                &wqe_size);
2141         if (ret_code)
2142                 return ret_code;
2143         qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
2144                                 (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
2145         i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
2146                     "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
2147                     __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
2148         qp->sq_tph_val = info->sq_tph_val;
2149         qp->rq_tph_val = info->rq_tph_val;
2150         qp->sq_tph_en = info->sq_tph_en;
2151         qp->rq_tph_en = info->rq_tph_en;
2152         qp->rcv_tph_en = info->rcv_tph_en;
2153         qp->xmit_tph_en = info->xmit_tph_en;
2154         qp->qs_handle = qp->pd->dev->qs_handle;
2155         qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
2156
2157         return 0;
2158 }
2159
2160 /**
2161  * i40iw_sc_qp_create - create qp
2162  * @qp: sc qp
2163  * @info: qp create info
2164  * @scratch: u64 saved to be used during cqp completion
2165  * @post_sq: flag for cqp db to ring
2166  */
2167 static enum i40iw_status_code i40iw_sc_qp_create(
2168                                 struct i40iw_sc_qp *qp,
2169                                 struct i40iw_create_qp_info *info,
2170                                 u64 scratch,
2171                                 bool post_sq)
2172 {
2173         struct i40iw_sc_cqp *cqp;
2174         u64 *wqe;
2175         u64 header;
2176
2177         if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) ||
2178             (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID))
2179                 return I40IW_ERR_INVALID_QP_ID;
2180
2181         cqp = qp->pd->dev->cqp;
2182         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2183         if (!wqe)
2184                 return I40IW_ERR_RING_FULL;
2185
2186         set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2187
2188         set_64bit_val(wqe, 40, qp->shadow_area_pa);
2189
2190         header = qp->qp_uk.qp_id |
2191                  LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
2192                  LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) |
2193                  LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2194                  LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2195                  LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2196                  LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2197                  LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
2198                  LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2199                  LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2200                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2201
2202         i40iw_insert_wqe_hdr(wqe, header);
2203         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE",
2204                         wqe, I40IW_CQP_WQE_SIZE * 8);
2205
2206         if (post_sq)
2207                 i40iw_sc_cqp_post_sq(cqp);
2208         return 0;
2209 }
2210
2211 /**
2212  * i40iw_sc_qp_modify - modify qp cqp wqe
2213  * @qp: sc qp
2214  * @info: modify qp info
2215  * @scratch: u64 saved to be used during cqp completion
2216  * @post_sq: flag for cqp db to ring
2217  */
2218 static enum i40iw_status_code i40iw_sc_qp_modify(
2219                                 struct i40iw_sc_qp *qp,
2220                                 struct i40iw_modify_qp_info *info,
2221                                 u64 scratch,
2222                                 bool post_sq)
2223 {
2224         u64 *wqe;
2225         struct i40iw_sc_cqp *cqp;
2226         u64 header;
2227         u8 term_actions = 0;
2228         u8 term_len = 0;
2229
2230         cqp = qp->pd->dev->cqp;
2231         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2232         if (!wqe)
2233                 return I40IW_ERR_RING_FULL;
2234         if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) {
2235                 if (info->dont_send_fin)
2236                         term_actions += I40IWQP_TERM_SEND_TERM_ONLY;
2237                 if (info->dont_send_term)
2238                         term_actions += I40IWQP_TERM_SEND_FIN_ONLY;
2239                 if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) ||
2240                     (term_actions == I40IWQP_TERM_SEND_TERM_ONLY))
2241                         term_len = info->termlen;
2242         }
2243
2244         set_64bit_val(wqe,
2245                       8,
2246                       LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) |
2247                       LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
2248
2249         set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2250         set_64bit_val(wqe, 40, qp->shadow_area_pa);
2251
2252         header = qp->qp_uk.qp_id |
2253                  LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) |
2254                  LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) |
2255                  LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2256                  LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) |
2257                  LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2258                  LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2259                  LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
2260                  LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2261                  LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) |
2262                  LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
2263                  LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2264                  LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
2265                  LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
2266                  LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2267                  LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2268                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2269
2270         i40iw_insert_wqe_hdr(wqe, header);
2271
2272         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE",
2273                         wqe, I40IW_CQP_WQE_SIZE * 8);
2274
2275         if (post_sq)
2276                 i40iw_sc_cqp_post_sq(cqp);
2277         return 0;
2278 }
2279
2280 /**
2281  * i40iw_sc_qp_destroy - cqp destroy qp
2282  * @qp: sc qp
2283  * @scratch: u64 saved to be used during cqp completion
2284  * @remove_hash_idx: flag if to remove hash idx
2285  * @ignore_mw_bnd: memory window bind flag
2286  * @post_sq: flag for cqp db to ring
2287  */
2288 static enum i40iw_status_code i40iw_sc_qp_destroy(
2289                                         struct i40iw_sc_qp *qp,
2290                                         u64 scratch,
2291                                         bool remove_hash_idx,
2292                                         bool ignore_mw_bnd,
2293                                         bool post_sq)
2294 {
2295         u64 *wqe;
2296         struct i40iw_sc_cqp *cqp;
2297         u64 header;
2298
2299         cqp = qp->pd->dev->cqp;
2300         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2301         if (!wqe)
2302                 return I40IW_ERR_RING_FULL;
2303         set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2304         set_64bit_val(wqe, 40, qp->shadow_area_pa);
2305
2306         header = qp->qp_uk.qp_id |
2307                  LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) |
2308                  LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2309                  LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) |
2310                  LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2311                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2312
2313         i40iw_insert_wqe_hdr(wqe, header);
2314         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE",
2315                         wqe, I40IW_CQP_WQE_SIZE * 8);
2316
2317         if (post_sq)
2318                 i40iw_sc_cqp_post_sq(cqp);
2319         return 0;
2320 }
2321
2322 /**
2323  * i40iw_sc_qp_flush_wqes - flush qp's wqe
2324  * @qp: sc qp
2325  * @info: dlush information
2326  * @scratch: u64 saved to be used during cqp completion
2327  * @post_sq: flag for cqp db to ring
2328  */
2329 static enum i40iw_status_code i40iw_sc_qp_flush_wqes(
2330                                 struct i40iw_sc_qp *qp,
2331                                 struct i40iw_qp_flush_info *info,
2332                                 u64 scratch,
2333                                 bool post_sq)
2334 {
2335         u64 temp = 0;
2336         u64 *wqe;
2337         struct i40iw_sc_cqp *cqp;
2338         u64 header;
2339         bool flush_sq = false, flush_rq = false;
2340
2341         if (info->rq && !qp->flush_rq)
2342                 flush_rq = true;
2343
2344         if (info->sq && !qp->flush_sq)
2345                 flush_sq = true;
2346
2347         qp->flush_sq |= flush_sq;
2348         qp->flush_rq |= flush_rq;
2349         if (!flush_sq && !flush_rq) {
2350                 if (info->ae_code != I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR)
2351                         return 0;
2352         }
2353
2354         cqp = qp->pd->dev->cqp;
2355         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2356         if (!wqe)
2357                 return I40IW_ERR_RING_FULL;
2358         if (info->userflushcode) {
2359                 if (flush_rq) {
2360                         temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) |
2361                                 LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR);
2362                 }
2363                 if (flush_sq) {
2364                         temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) |
2365                                 LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR);
2366                 }
2367         }
2368         set_64bit_val(wqe, 16, temp);
2369
2370         temp = (info->generate_ae) ?
2371                 info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0;
2372
2373         set_64bit_val(wqe, 8, temp);
2374
2375         header = qp->qp_uk.qp_id |
2376                  LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) |
2377                  LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) |
2378                  LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) |
2379                  LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) |
2380                  LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |
2381                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2382
2383         i40iw_insert_wqe_hdr(wqe, header);
2384
2385         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE",
2386                         wqe, I40IW_CQP_WQE_SIZE * 8);
2387
2388         if (post_sq)
2389                 i40iw_sc_cqp_post_sq(cqp);
2390         return 0;
2391 }
2392
2393 /**
2394  * i40iw_sc_qp_upload_context - upload qp's context
2395  * @dev: sc device struct
2396  * @info: upload context info ptr for return
2397  * @scratch: u64 saved to be used during cqp completion
2398  * @post_sq: flag for cqp db to ring
2399  */
2400 static enum i40iw_status_code i40iw_sc_qp_upload_context(
2401                                         struct i40iw_sc_dev *dev,
2402                                         struct i40iw_upload_context_info *info,
2403                                         u64 scratch,
2404                                         bool post_sq)
2405 {
2406         u64 *wqe;
2407         struct i40iw_sc_cqp *cqp;
2408         u64 header;
2409
2410         cqp = dev->cqp;
2411         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2412         if (!wqe)
2413                 return I40IW_ERR_RING_FULL;
2414         set_64bit_val(wqe, 16, info->buf_pa);
2415
2416         header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) |
2417                  LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) |
2418                  LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) |
2419                  LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) |
2420                  LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) |
2421                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2422
2423         i40iw_insert_wqe_hdr(wqe, header);
2424
2425         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE",
2426                         wqe, I40IW_CQP_WQE_SIZE * 8);
2427
2428         if (post_sq)
2429                 i40iw_sc_cqp_post_sq(cqp);
2430         return 0;
2431 }
2432
2433 /**
2434  * i40iw_sc_qp_setctx - set qp's context
2435  * @qp: sc qp
2436  * @qp_ctx: context ptr
2437  * @info: ctx info
2438  */
2439 static enum i40iw_status_code i40iw_sc_qp_setctx(
2440                                 struct i40iw_sc_qp *qp,
2441                                 u64 *qp_ctx,
2442                                 struct i40iw_qp_host_ctx_info *info)
2443 {
2444         struct i40iwarp_offload_info *iw;
2445         struct i40iw_tcp_offload_info *tcp;
2446         u64 qw0, qw3, qw7 = 0;
2447
2448         iw = info->iwarp_info;
2449         tcp = info->tcp_info;
2450         qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
2451               LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
2452               LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
2453               LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |
2454               LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |
2455               LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) |
2456               LS_64(info->push_idx, I40IWQPC_PPIDX) |
2457               LS_64(info->push_mode_en, I40IWQPC_PMENA);
2458
2459         set_64bit_val(qp_ctx, 8, qp->sq_pa);
2460         set_64bit_val(qp_ctx, 16, qp->rq_pa);
2461
2462         qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2463               LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
2464               LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE);
2465
2466         set_64bit_val(qp_ctx,
2467                       128,
2468                       LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX));
2469
2470         set_64bit_val(qp_ctx,
2471                       136,
2472                       LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) |
2473                       LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM));
2474
2475         set_64bit_val(qp_ctx,
2476                       168,
2477                       LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX));
2478         set_64bit_val(qp_ctx,
2479                       176,
2480                       LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
2481                       LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
2482                       LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
2483                       LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
2484
2485         if (info->iwarp_info_valid) {
2486                 qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
2487                        LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
2488
2489                 qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
2490                 set_64bit_val(qp_ctx, 144, qp->q2_pa);
2491                 set_64bit_val(qp_ctx,
2492                               152,
2493                               LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
2494
2495                 /*
2496                 * Hard-code IRD_SIZE to hw-limit, 128, in qpctx, i.e matching an
2497                 *advertisable IRD of 64
2498                 */
2499                 iw->ird_size = I40IW_QPCTX_ENCD_MAXIRD;
2500                 set_64bit_val(qp_ctx,
2501                               160,
2502                               LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
2503                               LS_64(iw->ird_size, I40IWQPC_IRDSIZE) |
2504                               LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) |
2505                               LS_64(iw->rd_enable, I40IWQPC_RDOK) |
2506                               LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) |
2507                               LS_64(iw->bind_en, I40IWQPC_BINDEN) |
2508                               LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
2509                               LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
2510                               LS_64(1, I40IWQPC_IWARPMODE) |
2511                               LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
2512                               LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
2513                               LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) |
2514                               LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) |
2515                               LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET));
2516         }
2517         if (info->tcp_info_valid) {
2518                 qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) |
2519                        LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) |
2520                        LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) |
2521                        LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) |
2522                        LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) |
2523                        LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) |
2524                        LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH);
2525
2526                 qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) |
2527                        LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2528                        LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) |
2529                        LS_64(tcp->tos, I40IWQPC_TOS) |
2530                        LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) |
2531                        LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM);
2532
2533                 qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
2534                 set_64bit_val(qp_ctx,
2535                               32,
2536                               LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) |
2537                               LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3));
2538
2539                 set_64bit_val(qp_ctx,
2540                               40,
2541                               LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) |
2542                               LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1));
2543
2544                 set_64bit_val(qp_ctx,
2545                               48,
2546                               LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) |
2547                                 LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) |
2548                                 LS_64(tcp->arp_idx, I40IWQPC_ARPIDX));
2549
2550                 qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) |
2551                        LS_64(tcp->wscale, I40IWQPC_WSCALE) |
2552                        LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) |
2553                        LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) |
2554                        LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) |
2555                        LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) |
2556                        LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE);
2557
2558                 set_64bit_val(qp_ctx,
2559                               72,
2560                               LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) |
2561                               LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE));
2562                 set_64bit_val(qp_ctx,
2563                               80,
2564                               LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) |
2565                               LS_64(tcp->snd_wnd, I40IWQPC_SNDWND));
2566
2567                 set_64bit_val(qp_ctx,
2568                               88,
2569                               LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |
2570                               LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND));
2571                 set_64bit_val(qp_ctx,
2572                               96,
2573                               LS_64(tcp->snd_max, I40IWQPC_SNDMAX) |
2574                               LS_64(tcp->snd_una, I40IWQPC_SNDUNA));
2575                 set_64bit_val(qp_ctx,
2576                               104,
2577                               LS_64(tcp->srtt, I40IWQPC_SRTT) |
2578                               LS_64(tcp->rtt_var, I40IWQPC_RTTVAR));
2579                 set_64bit_val(qp_ctx,
2580                               112,
2581                               LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) |
2582                               LS_64(tcp->cwnd, I40IWQPC_CWND));
2583                 set_64bit_val(qp_ctx,
2584                               120,
2585                               LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) |
2586                               LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2));
2587                 set_64bit_val(qp_ctx,
2588                               128,
2589                               LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) |
2590                               LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH));
2591                 set_64bit_val(qp_ctx,
2592                               184,
2593                               LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) |
2594                               LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2));
2595                 set_64bit_val(qp_ctx,
2596                               192,
2597                               LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) |
2598                               LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0));
2599         }
2600
2601         set_64bit_val(qp_ctx, 0, qw0);
2602         set_64bit_val(qp_ctx, 24, qw3);
2603         set_64bit_val(qp_ctx, 56, qw7);
2604
2605         i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE",
2606                         qp_ctx, I40IW_QP_CTX_SIZE);
2607         return 0;
2608 }
2609
2610 /**
2611  * i40iw_sc_alloc_stag - mr stag alloc
2612  * @dev: sc device struct
2613  * @info: stag info
2614  * @scratch: u64 saved to be used during cqp completion
2615  * @post_sq: flag for cqp db to ring
2616  */
2617 static enum i40iw_status_code i40iw_sc_alloc_stag(
2618                                 struct i40iw_sc_dev *dev,
2619                                 struct i40iw_allocate_stag_info *info,
2620                                 u64 scratch,
2621                                 bool post_sq)
2622 {
2623         u64 *wqe;
2624         struct i40iw_sc_cqp *cqp;
2625         u64 header;
2626
2627         cqp = dev->cqp;
2628         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2629         if (!wqe)
2630                 return I40IW_ERR_RING_FULL;
2631         set_64bit_val(wqe,
2632                       8,
2633                       LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) |
2634                       LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN));
2635         set_64bit_val(wqe,
2636                       16,
2637                       LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2638         set_64bit_val(wqe,
2639                       40,
2640                       LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX));
2641
2642         header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
2643                  LS_64(1, I40IW_CQPSQ_STAG_MR) |
2644                  LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2645                  LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
2646                  LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
2647                  LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2648                  LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
2649                  LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
2650                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2651
2652         i40iw_insert_wqe_hdr(wqe, header);
2653
2654         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE",
2655                         wqe, I40IW_CQP_WQE_SIZE * 8);
2656
2657         if (post_sq)
2658                 i40iw_sc_cqp_post_sq(cqp);
2659         return 0;
2660 }
2661
2662 /**
2663  * i40iw_sc_mr_reg_non_shared - non-shared mr registration
2664  * @dev: sc device struct
2665  * @info: mr info
2666  * @scratch: u64 saved to be used during cqp completion
2667  * @post_sq: flag for cqp db to ring
2668  */
2669 static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
2670                                 struct i40iw_sc_dev *dev,
2671                                 struct i40iw_reg_ns_stag_info *info,
2672                                 u64 scratch,
2673                                 bool post_sq)
2674 {
2675         u64 *wqe;
2676         u64 temp;
2677         struct i40iw_sc_cqp *cqp;
2678         u64 header;
2679         u32 pble_obj_cnt;
2680         bool remote_access;
2681         u8 addr_type;
2682
2683         if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
2684                                    I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
2685                 remote_access = true;
2686         else
2687                 remote_access = false;
2688
2689         pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2690
2691         if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt))
2692                 return I40IW_ERR_INVALID_PBLE_INDEX;
2693
2694         cqp = dev->cqp;
2695         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2696         if (!wqe)
2697                 return I40IW_ERR_RING_FULL;
2698
2699         temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
2700         set_64bit_val(wqe, 0, temp);
2701
2702         set_64bit_val(wqe,
2703                       8,
2704                       LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) |
2705                       LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
2706
2707         set_64bit_val(wqe,
2708                       16,
2709                       LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) |
2710                       LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2711         if (!info->chunk_size) {
2712                 set_64bit_val(wqe, 32, info->reg_addr_pa);
2713                 set_64bit_val(wqe, 48, 0);
2714         } else {
2715                 set_64bit_val(wqe, 32, 0);
2716                 set_64bit_val(wqe, 48, info->first_pm_pbl_index);
2717         }
2718         set_64bit_val(wqe, 40, info->hmc_fcn_index);
2719         set_64bit_val(wqe, 56, 0);
2720
2721         addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
2722         header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
2723                  LS_64(1, I40IW_CQPSQ_STAG_MR) |
2724                  LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
2725                  LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
2726                  LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2727                  LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2728                  LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
2729                  LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
2730                  LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
2731                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2732
2733         i40iw_insert_wqe_hdr(wqe, header);
2734
2735         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE",
2736                         wqe, I40IW_CQP_WQE_SIZE * 8);
2737
2738         if (post_sq)
2739                 i40iw_sc_cqp_post_sq(cqp);
2740         return 0;
2741 }
2742
2743 /**
2744  * i40iw_sc_mr_reg_shared - registered shared memory region
2745  * @dev: sc device struct
2746  * @info: info for shared memory registeration
2747  * @scratch: u64 saved to be used during cqp completion
2748  * @post_sq: flag for cqp db to ring
2749  */
2750 static enum i40iw_status_code i40iw_sc_mr_reg_shared(
2751                                         struct i40iw_sc_dev *dev,
2752                                         struct i40iw_register_shared_stag *info,
2753                                         u64 scratch,
2754                                         bool post_sq)
2755 {
2756         u64 *wqe;
2757         struct i40iw_sc_cqp *cqp;
2758         u64 temp, va64, fbo, header;
2759         u32 va32;
2760         bool remote_access;
2761         u8 addr_type;
2762
2763         if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
2764                                    I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
2765                 remote_access = true;
2766         else
2767                 remote_access = false;
2768         cqp = dev->cqp;
2769         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2770         if (!wqe)
2771                 return I40IW_ERR_RING_FULL;
2772         va64 = (uintptr_t)(info->va);
2773         va32 = (u32)(va64 & 0x00000000FFFFFFFF);
2774         fbo = (u64)(va32 & (4096 - 1));
2775
2776         set_64bit_val(wqe,
2777                       0,
2778                       (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo));
2779
2780         set_64bit_val(wqe,
2781                       8,
2782                       LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
2783         temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) |
2784                LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) |
2785                LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX);
2786         set_64bit_val(wqe, 16, temp);
2787
2788         addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
2789         header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) |
2790                  LS_64(1, I40IW_CQPSQ_STAG_MR) |
2791                  LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2792                  LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2793                  LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
2794                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2795
2796         i40iw_insert_wqe_hdr(wqe, header);
2797
2798         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE",
2799                         wqe, I40IW_CQP_WQE_SIZE * 8);
2800
2801         if (post_sq)
2802                 i40iw_sc_cqp_post_sq(cqp);
2803         return 0;
2804 }
2805
2806 /**
2807  * i40iw_sc_dealloc_stag - deallocate stag
2808  * @dev: sc device struct
2809  * @info: dealloc stag info
2810  * @scratch: u64 saved to be used during cqp completion
2811  * @post_sq: flag for cqp db to ring
2812  */
2813 static enum i40iw_status_code i40iw_sc_dealloc_stag(
2814                                         struct i40iw_sc_dev *dev,
2815                                         struct i40iw_dealloc_stag_info *info,
2816                                         u64 scratch,
2817                                         bool post_sq)
2818 {
2819         u64 header;
2820         u64 *wqe;
2821         struct i40iw_sc_cqp *cqp;
2822
2823         cqp = dev->cqp;
2824         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2825         if (!wqe)
2826                 return I40IW_ERR_RING_FULL;
2827         set_64bit_val(wqe,
2828                       8,
2829                       LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
2830         set_64bit_val(wqe,
2831                       16,
2832                       LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2833
2834         header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) |
2835                  LS_64(info->mr, I40IW_CQPSQ_STAG_MR) |
2836                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2837
2838         i40iw_insert_wqe_hdr(wqe, header);
2839
2840         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE",
2841                         wqe, I40IW_CQP_WQE_SIZE * 8);
2842
2843         if (post_sq)
2844                 i40iw_sc_cqp_post_sq(cqp);
2845         return 0;
2846 }
2847
2848 /**
2849  * i40iw_sc_query_stag - query hardware for stag
2850  * @dev: sc device struct
2851  * @scratch: u64 saved to be used during cqp completion
2852  * @stag_index: stag index for query
2853  * @post_sq: flag for cqp db to ring
2854  */
2855 static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev,
2856                                                   u64 scratch,
2857                                                   u32 stag_index,
2858                                                   bool post_sq)
2859 {
2860         u64 header;
2861         u64 *wqe;
2862         struct i40iw_sc_cqp *cqp;
2863
2864         cqp = dev->cqp;
2865         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2866         if (!wqe)
2867                 return I40IW_ERR_RING_FULL;
2868         set_64bit_val(wqe,
2869                       16,
2870                       LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX));
2871
2872         header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) |
2873                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2874
2875         i40iw_insert_wqe_hdr(wqe, header);
2876
2877         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE",
2878                         wqe, I40IW_CQP_WQE_SIZE * 8);
2879
2880         if (post_sq)
2881                 i40iw_sc_cqp_post_sq(cqp);
2882         return 0;
2883 }
2884
2885 /**
2886  * i40iw_sc_mw_alloc - mw allocate
2887  * @dev: sc device struct
2888  * @scratch: u64 saved to be used during cqp completion
2889  * @mw_stag_index:stag index
2890  * @pd_id: pd is for this mw
2891  * @post_sq: flag for cqp db to ring
2892  */
2893 static enum i40iw_status_code i40iw_sc_mw_alloc(
2894                                         struct i40iw_sc_dev *dev,
2895                                         u64 scratch,
2896                                         u32 mw_stag_index,
2897                                         u16 pd_id,
2898                                         bool post_sq)
2899 {
2900         u64 header;
2901         struct i40iw_sc_cqp *cqp;
2902         u64 *wqe;
2903
2904         cqp = dev->cqp;
2905         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2906         if (!wqe)
2907                 return I40IW_ERR_RING_FULL;
2908         set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID));
2909         set_64bit_val(wqe,
2910                       16,
2911                       LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX));
2912
2913         header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
2914                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2915
2916         i40iw_insert_wqe_hdr(wqe, header);
2917
2918         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE",
2919                         wqe, I40IW_CQP_WQE_SIZE * 8);
2920
2921         if (post_sq)
2922                 i40iw_sc_cqp_post_sq(cqp);
2923         return 0;
2924 }
2925
2926 /**
2927  * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
2928  * @qp: sc qp struct
2929  * @info: fast mr info
2930  * @post_sq: flag for cqp db to ring
2931  */
2932 enum i40iw_status_code i40iw_sc_mr_fast_register(
2933                                 struct i40iw_sc_qp *qp,
2934                                 struct i40iw_fast_reg_stag_info *info,
2935                                 bool post_sq)
2936 {
2937         u64 temp, header;
2938         u64 *wqe;
2939         u32 wqe_idx;
2940
2941         wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
2942                                          0, info->wr_id);
2943         if (!wqe)
2944                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
2945
2946         i40iw_debug(qp->dev, I40IW_DEBUG_MR, "%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
2947                     __func__, info->wr_id, wqe_idx,
2948                     &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
2949         temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
2950         set_64bit_val(wqe, 0, temp);
2951
2952         temp = RS_64(info->first_pm_pbl_index >> 16, I40IWQPSQ_FIRSTPMPBLIDXHI);
2953         set_64bit_val(wqe,
2954                       8,
2955                       LS_64(temp, I40IWQPSQ_FIRSTPMPBLIDXHI) |
2956                       LS_64(info->reg_addr_pa >> I40IWQPSQ_PBLADDR_SHIFT, I40IWQPSQ_PBLADDR));
2957
2958         set_64bit_val(wqe,
2959                       16,
2960                       info->total_len |
2961                       LS_64(info->first_pm_pbl_index, I40IWQPSQ_FIRSTPMPBLIDXLO));
2962
2963         header = LS_64(info->stag_key, I40IWQPSQ_STAGKEY) |
2964                  LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
2965                  LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
2966                  LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
2967                  LS_64(info->page_size, I40IWQPSQ_HPAGESIZE) |
2968                  LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
2969                  LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
2970                  LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
2971                  LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
2972                  LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
2973                  LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
2974
2975         i40iw_insert_wqe_hdr(wqe, header);
2976
2977         i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "FAST_REG WQE",
2978                         wqe, I40IW_QP_WQE_MIN_SIZE);
2979
2980         if (post_sq)
2981                 i40iw_qp_post_wr(&qp->qp_uk);
2982         return 0;
2983 }
2984
2985 /**
2986  * i40iw_sc_send_lsmm - send last streaming mode message
2987  * @qp: sc qp struct
2988  * @lsmm_buf: buffer with lsmm message
2989  * @size: size of lsmm buffer
2990  * @stag: stag of lsmm buffer
2991  */
2992 static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp,
2993                                void *lsmm_buf,
2994                                u32 size,
2995                                i40iw_stag stag)
2996 {
2997         u64 *wqe;
2998         u64 header;
2999         struct i40iw_qp_uk *qp_uk;
3000
3001         qp_uk = &qp->qp_uk;
3002         wqe = qp_uk->sq_base->elem;
3003
3004         set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3005
3006         set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG)));
3007
3008         set_64bit_val(wqe, 16, 0);
3009
3010         header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3011                  LS_64(1, I40IWQPSQ_STREAMMODE) |
3012                  LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3013                  LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3014
3015         i40iw_insert_wqe_hdr(wqe, header);
3016
3017         i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE",
3018                         wqe, I40IW_QP_WQE_MIN_SIZE);
3019 }
3020
3021 /**
3022  * i40iw_sc_send_lsmm_nostag - for privilege qp
3023  * @qp: sc qp struct
3024  * @lsmm_buf: buffer with lsmm message
3025  * @size: size of lsmm buffer
3026  */
3027 static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp,
3028                                       void *lsmm_buf,
3029                                       u32 size)
3030 {
3031         u64 *wqe;
3032         u64 header;
3033         struct i40iw_qp_uk *qp_uk;
3034
3035         qp_uk = &qp->qp_uk;
3036         wqe = qp_uk->sq_base->elem;
3037
3038         set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3039
3040         set_64bit_val(wqe, 8, size);
3041
3042         set_64bit_val(wqe, 16, 0);
3043
3044         header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3045                  LS_64(1, I40IWQPSQ_STREAMMODE) |
3046                  LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3047                  LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3048
3049         i40iw_insert_wqe_hdr(wqe, header);
3050
3051         i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE",
3052                         wqe, I40IW_QP_WQE_MIN_SIZE);
3053 }
3054
3055 /**
3056  * i40iw_sc_send_rtt - send last read0 or write0
3057  * @qp: sc qp struct
3058  * @read: Do read0 or write0
3059  */
3060 static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read)
3061 {
3062         u64 *wqe;
3063         u64 header;
3064         struct i40iw_qp_uk *qp_uk;
3065
3066         qp_uk = &qp->qp_uk;
3067         wqe = qp_uk->sq_base->elem;
3068
3069         set_64bit_val(wqe, 0, 0);
3070         set_64bit_val(wqe, 8, 0);
3071         set_64bit_val(wqe, 16, 0);
3072         if (read) {
3073                 header = LS_64(0x1234, I40IWQPSQ_REMSTAG) |
3074                          LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) |
3075                          LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3076                 set_64bit_val(wqe, 8, ((u64)0xabcd << 32));
3077         } else {
3078                 header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
3079                          LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3080         }
3081
3082         i40iw_insert_wqe_hdr(wqe, header);
3083
3084         i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE",
3085                         wqe, I40IW_QP_WQE_MIN_SIZE);
3086 }
3087
3088 /**
3089  * i40iw_sc_post_wqe0 - send wqe with opcode
3090  * @qp: sc qp struct
3091  * @opcode: opcode to use for wqe0
3092  */
3093 static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode)
3094 {
3095         u64 *wqe;
3096         u64 header;
3097         struct i40iw_qp_uk *qp_uk;
3098
3099         qp_uk = &qp->qp_uk;
3100         wqe = qp_uk->sq_base->elem;
3101
3102         if (!wqe)
3103                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
3104         switch (opcode) {
3105         case I40IWQP_OP_NOP:
3106                 set_64bit_val(wqe, 0, 0);
3107                 set_64bit_val(wqe, 8, 0);
3108                 set_64bit_val(wqe, 16, 0);
3109                 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
3110                          LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3111
3112                 i40iw_insert_wqe_hdr(wqe, header);
3113                 break;
3114         case I40IWQP_OP_RDMA_SEND:
3115                 set_64bit_val(wqe, 0, 0);
3116                 set_64bit_val(wqe, 8, 0);
3117                 set_64bit_val(wqe, 16, 0);
3118                 header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3119                          LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) |
3120                          LS_64(1, I40IWQPSQ_STREAMMODE) |
3121                          LS_64(1, I40IWQPSQ_WAITFORRCVPDU);
3122
3123                 i40iw_insert_wqe_hdr(wqe, header);
3124                 break;
3125         default:
3126                 i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n",
3127                             __func__);
3128                 break;
3129         }
3130         return 0;
3131 }
3132
3133 /**
3134  * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
3135  * @dev : ptr to i40iw_dev struct
3136  * @hmc_fn_id: hmc function id
3137  */
3138 enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
3139 {
3140         struct i40iw_hmc_info *hmc_info;
3141         struct i40iw_dma_mem query_fpm_mem;
3142         struct i40iw_virt_mem virt_mem;
3143         struct i40iw_vfdev *vf_dev = NULL;
3144         u32 mem_size;
3145         enum i40iw_status_code ret_code = 0;
3146         bool poll_registers = true;
3147         u16 iw_vf_idx;
3148         u8 wait_type;
3149
3150         if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3151             (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3152                 return I40IW_ERR_INVALID_HMCFN_ID;
3153
3154         i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id,
3155                     dev->hmc_fn_id);
3156         if (hmc_fn_id == dev->hmc_fn_id) {
3157                 hmc_info = dev->hmc_info;
3158                 query_fpm_mem.pa = dev->fpm_query_buf_pa;
3159                 query_fpm_mem.va = dev->fpm_query_buf;
3160         } else {
3161                 vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id);
3162                 if (!vf_dev)
3163                         return I40IW_ERR_INVALID_VF_ID;
3164
3165                 hmc_info = &vf_dev->hmc_info;
3166                 iw_vf_idx = vf_dev->iw_vf_idx;
3167                 i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev,
3168                             hmc_info, hmc_info->hmc_obj);
3169                 if (!vf_dev->fpm_query_buf) {
3170                         if (!dev->vf_fpm_query_buf[iw_vf_idx].va) {
3171                                 ret_code = i40iw_alloc_query_fpm_buf(dev,
3172                                                                      &dev->vf_fpm_query_buf[iw_vf_idx]);
3173                                 if (ret_code)
3174                                         return ret_code;
3175                         }
3176                         vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va;
3177                         vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa;
3178                 }
3179                 query_fpm_mem.pa = vf_dev->fpm_query_buf_pa;
3180                 query_fpm_mem.va = vf_dev->fpm_query_buf;
3181                 /**
3182                  * It is HARDWARE specific:
3183                  * this call is done by PF for VF and
3184                  * i40iw_sc_query_fpm_values needs ccq poll
3185                  * because PF ccq is already created.
3186                  */
3187                 poll_registers = false;
3188         }
3189
3190         hmc_info->hmc_fn_id = hmc_fn_id;
3191
3192         if (hmc_fn_id != dev->hmc_fn_id) {
3193                 ret_code =
3194                         i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3195         } else {
3196                 wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3197                             (u8)I40IW_CQP_WAIT_POLL_CQ;
3198
3199                 ret_code = i40iw_sc_query_fpm_values(
3200                                         dev->cqp,
3201                                         0,
3202                                         hmc_info->hmc_fn_id,
3203                                         &query_fpm_mem,
3204                                         true,
3205                                         wait_type);
3206         }
3207         if (ret_code)
3208                 return ret_code;
3209
3210         /* parse the fpm_query_buf and fill hmc obj info */
3211         ret_code =
3212                 i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va,
3213                                              hmc_info,
3214                                              &dev->hmc_fpm_misc);
3215         if (ret_code)
3216                 return ret_code;
3217         i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER",
3218                         query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE);
3219
3220         if (hmc_fn_id != dev->hmc_fn_id) {
3221                 i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3222
3223                 /* parse the fpm_commit_buf and fill hmc obj info */
3224                 i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj, &hmc_info->sd_table.sd_cnt);
3225                 mem_size = sizeof(struct i40iw_hmc_sd_entry) *
3226                            (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);
3227                 ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
3228                 if (ret_code)
3229                         return ret_code;
3230                 hmc_info->sd_table.sd_entry = virt_mem.va;
3231         }
3232
3233         /* fill size of objects which are fixed */
3234         hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
3235         hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
3236         hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
3237         hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
3238         hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
3239
3240         return ret_code;
3241 }
3242
3243 /**
3244  * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
3245  * populates fpm base address in hmc_info
3246  * @dev : ptr to i40iw_dev struct
3247  * @hmc_fn_id: hmc function id
3248  */
3249 static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev,
3250                                                         u8 hmc_fn_id)
3251 {
3252         struct i40iw_hmc_info *hmc_info;
3253         struct i40iw_hmc_obj_info *obj_info;
3254         u64 *buf;
3255         struct i40iw_dma_mem commit_fpm_mem;
3256         u32 i, j;
3257         enum i40iw_status_code ret_code = 0;
3258         bool poll_registers = true;
3259         u8 wait_type;
3260
3261         if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3262             (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3263                 return I40IW_ERR_INVALID_HMCFN_ID;
3264
3265         if (hmc_fn_id == dev->hmc_fn_id) {
3266                 hmc_info = dev->hmc_info;
3267         } else {
3268                 hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id);
3269                 poll_registers = false;
3270         }
3271         if (!hmc_info)
3272                 return I40IW_ERR_BAD_PTR;
3273
3274         obj_info = hmc_info->hmc_obj;
3275         buf = dev->fpm_commit_buf;
3276
3277         /* copy cnt values in commit buf */
3278         for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE;
3279              i++, j += 8)
3280                 set_64bit_val(buf, j, (u64)obj_info[i].cnt);
3281
3282         set_64bit_val(buf, 40, 0);   /* APBVT rsvd */
3283
3284         commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
3285         commit_fpm_mem.va = dev->fpm_commit_buf;
3286         wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3287                         (u8)I40IW_CQP_WAIT_POLL_CQ;
3288         ret_code = i40iw_sc_commit_fpm_values(
3289                                         dev->cqp,
3290                                         0,
3291                                         hmc_info->hmc_fn_id,
3292                                         &commit_fpm_mem,
3293                                         true,
3294                                         wait_type);
3295
3296         /* parse the fpm_commit_buf and fill hmc obj info */
3297         if (!ret_code)
3298                 ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf,
3299                                                          hmc_info->hmc_obj,
3300                                                          &hmc_info->sd_table.sd_cnt);
3301
3302         i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER",
3303                         commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);
3304
3305         return ret_code;
3306 }
3307
3308 /**
3309  * cqp_sds_wqe_fill - fill cqp wqe doe sd
3310  * @cqp: struct for cqp hw
3311  * @info; sd info for wqe
3312  * @scratch: u64 saved to be used during cqp completion
3313  */
3314 static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
3315                                                struct i40iw_update_sds_info *info,
3316                                                u64 scratch)
3317 {
3318         u64 data;
3319         u64 header;
3320         u64 *wqe;
3321         int mem_entries, wqe_entries;
3322         struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
3323
3324         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3325         if (!wqe)
3326                 return I40IW_ERR_RING_FULL;
3327
3328         I40IW_CQP_INIT_WQE(wqe);
3329         wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
3330         mem_entries = info->cnt - wqe_entries;
3331
3332         header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) |
3333                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
3334                  LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
3335
3336         if (mem_entries) {
3337                 memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
3338                 data = sdbuf->pa;
3339         } else {
3340                 data = 0;
3341         }
3342         data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID);
3343
3344         set_64bit_val(wqe, 16, data);
3345
3346         switch (wqe_entries) {
3347         case 3:
3348                 set_64bit_val(wqe, 48,
3349                               (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3350                                         LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3351
3352                 set_64bit_val(wqe, 56, info->entry[2].data);
3353                 /* fallthrough */
3354         case 2:
3355                 set_64bit_val(wqe, 32,
3356                               (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3357                                         LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3358
3359                 set_64bit_val(wqe, 40, info->entry[1].data);
3360                 /* fallthrough */
3361         case 1:
3362                 set_64bit_val(wqe, 0,
3363                               LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
3364
3365                 set_64bit_val(wqe, 8, info->entry[0].data);
3366                 break;
3367         default:
3368                 break;
3369         }
3370
3371         i40iw_insert_wqe_hdr(wqe, header);
3372
3373         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE",
3374                         wqe, I40IW_CQP_WQE_SIZE * 8);
3375         return 0;
3376 }
3377
3378 /**
3379  * i40iw_update_pe_sds - cqp wqe for sd
3380  * @dev: ptr to i40iw_dev struct
3381  * @info: sd info for sd's
3382  * @scratch: u64 saved to be used during cqp completion
3383  */
3384 static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev,
3385                                                   struct i40iw_update_sds_info *info,
3386                                                   u64 scratch)
3387 {
3388         struct i40iw_sc_cqp *cqp = dev->cqp;
3389         enum i40iw_status_code ret_code;
3390
3391         ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
3392         if (!ret_code)
3393                 i40iw_sc_cqp_post_sq(cqp);
3394
3395         return ret_code;
3396 }
3397
3398 /**
3399  * i40iw_update_sds_noccq - update sd before ccq created
3400  * @dev: sc device struct
3401  * @info: sd info for sd's
3402  */
3403 enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
3404                                               struct i40iw_update_sds_info *info)
3405 {
3406         u32 error, val, tail;
3407         struct i40iw_sc_cqp *cqp = dev->cqp;
3408         enum i40iw_status_code ret_code;
3409
3410         ret_code = cqp_sds_wqe_fill(cqp, info, 0);
3411         if (ret_code)
3412                 return ret_code;
3413         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3414         if (error)
3415                 return I40IW_ERR_CQP_COMPL_ERROR;
3416
3417         i40iw_sc_cqp_post_sq(cqp);
3418         ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
3419
3420         return ret_code;
3421 }
3422
3423 /**
3424  * i40iw_sc_suspend_qp - suspend qp for param change
3425  * @cqp: struct for cqp hw
3426  * @qp: sc qp struct
3427  * @scratch: u64 saved to be used during cqp completion
3428  */
3429 enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
3430                                            struct i40iw_sc_qp *qp,
3431                                            u64 scratch)
3432 {
3433         u64 header;
3434         u64 *wqe;
3435
3436         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3437         if (!wqe)
3438                 return I40IW_ERR_RING_FULL;
3439         header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) |
3440                  LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) |
3441                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3442
3443         i40iw_insert_wqe_hdr(wqe, header);
3444
3445         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE",
3446                         wqe, I40IW_CQP_WQE_SIZE * 8);
3447
3448         i40iw_sc_cqp_post_sq(cqp);
3449         return 0;
3450 }
3451
3452 /**
3453  * i40iw_sc_resume_qp - resume qp after suspend
3454  * @cqp: struct for cqp hw
3455  * @qp: sc qp struct
3456  * @scratch: u64 saved to be used during cqp completion
3457  */
3458 enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
3459                                           struct i40iw_sc_qp *qp,
3460                                           u64 scratch)
3461 {
3462         u64 header;
3463         u64 *wqe;
3464
3465         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3466         if (!wqe)
3467                 return I40IW_ERR_RING_FULL;
3468         set_64bit_val(wqe,
3469                       16,
3470                         LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE));
3471
3472         header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) |
3473                  LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) |
3474                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3475
3476         i40iw_insert_wqe_hdr(wqe, header);
3477
3478         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE",
3479                         wqe, I40IW_CQP_WQE_SIZE * 8);
3480
3481         i40iw_sc_cqp_post_sq(cqp);
3482         return 0;
3483 }
3484
3485 /**
3486  * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
3487  * @cqp: struct for cqp hw
3488  * @scratch: u64 saved to be used during cqp completion
3489  * @hmc_fn_id: hmc function id
3490  * @post_sq: flag for cqp db to ring
3491  * @poll_registers: flag to poll register for cqp completion
3492  */
3493 enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(
3494                                         struct i40iw_sc_cqp *cqp,
3495                                         u64 scratch,
3496                                         u8 hmc_fn_id,
3497                                         bool post_sq,
3498                                         bool poll_registers)
3499 {
3500         u64 header;
3501         u64 *wqe;
3502         u32 tail, val, error;
3503         enum i40iw_status_code ret_code = 0;
3504
3505         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3506         if (!wqe)
3507                 return I40IW_ERR_RING_FULL;
3508         set_64bit_val(wqe,
3509                       16,
3510                       LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID));
3511
3512         header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) |
3513                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3514
3515         i40iw_insert_wqe_hdr(wqe, header);
3516
3517         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
3518                         wqe, I40IW_CQP_WQE_SIZE * 8);
3519         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3520         if (error) {
3521                 ret_code = I40IW_ERR_CQP_COMPL_ERROR;
3522                 return ret_code;
3523         }
3524         if (post_sq) {
3525                 i40iw_sc_cqp_post_sq(cqp);
3526                 if (poll_registers)
3527                         /* check for cqp sq tail update */
3528                         ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
3529                 else
3530                         ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
3531                                                                  I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
3532                                                                  NULL);
3533         }
3534
3535         return ret_code;
3536 }
3537
3538 /**
3539  * i40iw_ring_full - check if cqp ring is full
3540  * @cqp: struct for cqp hw
3541  */
3542 static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
3543 {
3544         return I40IW_RING_FULL_ERR(cqp->sq_ring);
3545 }
3546
3547 /**
3548  * i40iw_est_sd - returns approximate number of SDs for HMC
3549  * @dev: sc device struct
3550  * @hmc_info: hmc structure, size and count for HMC objects
3551  */
3552 static u64 i40iw_est_sd(struct i40iw_sc_dev *dev, struct i40iw_hmc_info *hmc_info)
3553 {
3554         int i;
3555         u64 size = 0;
3556         u64 sd;
3557
3558         for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_PBLE; i++)
3559                 size += hmc_info->hmc_obj[i].cnt * hmc_info->hmc_obj[i].size;
3560
3561         if (dev->is_pf)
3562                 size += hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3563
3564         if (size & 0x1FFFFF)
3565                 sd = (size >> 21) + 1; /* add 1 for remainder */
3566         else
3567                 sd = size >> 21;
3568
3569         if (!dev->is_pf) {
3570                 /* 2MB alignment for VF PBLE HMC */
3571                 size = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3572                 if (size & 0x1FFFFF)
3573                         sd += (size >> 21) + 1; /* add 1 for remainder */
3574                 else
3575                         sd += size >> 21;
3576         }
3577
3578         return sd;
3579 }
3580
3581 /**
3582  * i40iw_config_fpm_values - configure HMC objects
3583  * @dev: sc device struct
3584  * @qp_count: desired qp count
3585  */
3586 enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count)
3587 {
3588         struct i40iw_virt_mem virt_mem;
3589         u32 i, mem_size;
3590         u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
3591         u32 powerof2;
3592         u64 sd_needed;
3593         u32 loop_count = 0;
3594
3595         struct i40iw_hmc_info *hmc_info;
3596         struct i40iw_hmc_fpm_misc *hmc_fpm_misc;
3597         enum i40iw_status_code ret_code = 0;
3598
3599         hmc_info = dev->hmc_info;
3600         hmc_fpm_misc = &dev->hmc_fpm_misc;
3601
3602         ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id);
3603         if (ret_code) {
3604                 i40iw_debug(dev, I40IW_DEBUG_HMC,
3605                             "i40iw_sc_init_iw_hmc returned error_code = %d\n",
3606                             ret_code);
3607                 return ret_code;
3608         }
3609
3610         for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
3611                 hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
3612         sd_needed = i40iw_est_sd(dev, hmc_info);
3613         i40iw_debug(dev, I40IW_DEBUG_HMC,
3614                     "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
3615                     __func__, sd_needed, hmc_info->first_sd_index);
3616         i40iw_debug(dev, I40IW_DEBUG_HMC,
3617                     "%s: sd count %d where max sd is %d\n",
3618                     __func__, hmc_info->sd_table.sd_cnt,
3619                     hmc_fpm_misc->max_sds);
3620
3621         qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);
3622         qpwantedoriginal = qpwanted;
3623         mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt;
3624         pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt;
3625
3626         i40iw_debug(dev, I40IW_DEBUG_HMC,
3627                     "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
3628                     qp_count, hmc_fpm_misc->max_sds,
3629                     hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt,
3630                     hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt,
3631                     hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt,
3632                     hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt);
3633
3634         do {
3635                 ++loop_count;
3636                 hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted;
3637                 hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt =
3638                         min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt);
3639                 hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */
3640                 hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt =
3641                                         qpwanted * hmc_fpm_misc->ht_multiplier;
3642                 hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt =
3643                         hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt;
3644                 hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
3645                 hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
3646
3647                 hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt =
3648                         roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted);
3649                 hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt =
3650                         roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted);
3651                 hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
3652                         hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
3653                 hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
3654                         hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
3655                 hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt =
3656                         ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket;
3657                 hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00;
3658                 hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00;
3659                 hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;
3660
3661                 /* How much memory is needed for all the objects. */
3662                 sd_needed = i40iw_est_sd(dev, hmc_info);
3663                 if ((loop_count > 1000) ||
3664                     ((!(loop_count % 10)) &&
3665                     (qpwanted > qpwantedoriginal * 2 / 3))) {
3666                         if (qpwanted > FPM_MULTIPLIER) {
3667                                 qpwanted -= FPM_MULTIPLIER;
3668                                 powerof2 = 1;
3669                                 while (powerof2 < qpwanted)
3670                                         powerof2 *= 2;
3671                                 powerof2 /= 2;
3672                                 qpwanted = powerof2;
3673                         } else {
3674                                 qpwanted /= 2;
3675                         }
3676                 }
3677                 if (mrwanted > FPM_MULTIPLIER * 10)
3678                         mrwanted -= FPM_MULTIPLIER * 10;
3679                 if (pblewanted > FPM_MULTIPLIER * 1000)
3680                         pblewanted -= FPM_MULTIPLIER * 1000;
3681         } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
3682
3683         sd_needed = i40iw_est_sd(dev, hmc_info);
3684
3685         i40iw_debug(dev, I40IW_DEBUG_HMC,
3686                     "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
3687                     loop_count, sd_needed,
3688                     hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt,
3689                     hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt,
3690                     hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt,
3691                     hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt);
3692
3693         ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id);
3694         if (ret_code) {
3695                 i40iw_debug(dev, I40IW_DEBUG_HMC,
3696                             "configure_iw_fpm returned error_code[x%08X]\n",
3697                             i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1));
3698                 return ret_code;
3699         }
3700
3701         mem_size = sizeof(struct i40iw_hmc_sd_entry) *
3702                    (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
3703         ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
3704         if (ret_code) {
3705                 i40iw_debug(dev, I40IW_DEBUG_HMC,
3706                             "%s: failed to allocate memory for sd_entry buffer\n",
3707                             __func__);
3708                 return ret_code;
3709         }
3710         hmc_info->sd_table.sd_entry = virt_mem.va;
3711
3712         return ret_code;
3713 }
3714
3715 /**
3716  * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
3717  * @dev: rdma device
3718  * @pcmdinfo: cqp command info
3719  */
3720 static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
3721                                                  struct cqp_commands_info *pcmdinfo)
3722 {
3723         enum i40iw_status_code status;
3724         struct i40iw_dma_mem values_mem;
3725
3726         dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
3727         switch (pcmdinfo->cqp_cmd) {
3728         case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY:
3729                 status = i40iw_sc_del_local_mac_ipaddr_entry(
3730                                 pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,
3731                                 pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch,
3732                                 pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx,
3733                                 pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count,
3734                                 pcmdinfo->post_sq);
3735                 break;
3736         case OP_CEQ_DESTROY:
3737                 status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
3738                                               pcmdinfo->in.u.ceq_destroy.scratch,
3739                                               pcmdinfo->post_sq);
3740                 break;
3741         case OP_AEQ_DESTROY:
3742                 status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
3743                                               pcmdinfo->in.u.aeq_destroy.scratch,
3744                                               pcmdinfo->post_sq);
3745
3746                 break;
3747         case OP_DELETE_ARP_CACHE_ENTRY:
3748                 status = i40iw_sc_del_arp_cache_entry(
3749                                 pcmdinfo->in.u.del_arp_cache_entry.cqp,
3750                                 pcmdinfo->in.u.del_arp_cache_entry.scratch,
3751                                 pcmdinfo->in.u.del_arp_cache_entry.arp_index,
3752                                 pcmdinfo->post_sq);
3753                 break;
3754         case OP_MANAGE_APBVT_ENTRY:
3755                 status = i40iw_sc_manage_apbvt_entry(
3756                                 pcmdinfo->in.u.manage_apbvt_entry.cqp,
3757                                 &pcmdinfo->in.u.manage_apbvt_entry.info,
3758                                 pcmdinfo->in.u.manage_apbvt_entry.scratch,
3759                                 pcmdinfo->post_sq);
3760                 break;
3761         case OP_CEQ_CREATE:
3762                 status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
3763                                              pcmdinfo->in.u.ceq_create.scratch,
3764                                              pcmdinfo->post_sq);
3765                 break;
3766         case OP_AEQ_CREATE:
3767                 status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
3768                                              pcmdinfo->in.u.aeq_create.scratch,
3769                                              pcmdinfo->post_sq);
3770                 break;
3771         case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY:
3772                 status = i40iw_sc_alloc_local_mac_ipaddr_entry(
3773                                 pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,
3774                                 pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch,
3775                                 pcmdinfo->post_sq);
3776                 break;
3777         case OP_ADD_LOCAL_MAC_IPADDR_ENTRY:
3778                 status = i40iw_sc_add_local_mac_ipaddr_entry(
3779                                 pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,
3780                                 &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info,
3781                                 pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch,
3782                                 pcmdinfo->post_sq);
3783                 break;
3784         case OP_MANAGE_QHASH_TABLE_ENTRY:
3785                 status = i40iw_sc_manage_qhash_table_entry(
3786                                 pcmdinfo->in.u.manage_qhash_table_entry.cqp,
3787                                 &pcmdinfo->in.u.manage_qhash_table_entry.info,
3788                                 pcmdinfo->in.u.manage_qhash_table_entry.scratch,
3789                                 pcmdinfo->post_sq);
3790
3791                 break;
3792         case OP_QP_MODIFY:
3793                 status = i40iw_sc_qp_modify(
3794                                 pcmdinfo->in.u.qp_modify.qp,
3795                                 &pcmdinfo->in.u.qp_modify.info,
3796                                 pcmdinfo->in.u.qp_modify.scratch,
3797                                 pcmdinfo->post_sq);
3798
3799                 break;
3800         case OP_QP_UPLOAD_CONTEXT:
3801                 status = i40iw_sc_qp_upload_context(
3802                                 pcmdinfo->in.u.qp_upload_context.dev,
3803                                 &pcmdinfo->in.u.qp_upload_context.info,
3804                                 pcmdinfo->in.u.qp_upload_context.scratch,
3805                                 pcmdinfo->post_sq);
3806
3807                 break;
3808         case OP_CQ_CREATE:
3809                 status = i40iw_sc_cq_create(
3810                                 pcmdinfo->in.u.cq_create.cq,
3811                                 pcmdinfo->in.u.cq_create.scratch,
3812                                 pcmdinfo->in.u.cq_create.check_overflow,
3813                                 pcmdinfo->post_sq);
3814                 break;
3815         case OP_CQ_DESTROY:
3816                 status = i40iw_sc_cq_destroy(
3817                                 pcmdinfo->in.u.cq_destroy.cq,
3818                                 pcmdinfo->in.u.cq_destroy.scratch,
3819                                 pcmdinfo->post_sq);
3820
3821                 break;
3822         case OP_QP_CREATE:
3823                 status = i40iw_sc_qp_create(
3824                                 pcmdinfo->in.u.qp_create.qp,
3825                                 &pcmdinfo->in.u.qp_create.info,
3826                                 pcmdinfo->in.u.qp_create.scratch,
3827                                 pcmdinfo->post_sq);
3828                 break;
3829         case OP_QP_DESTROY:
3830                 status = i40iw_sc_qp_destroy(
3831                                 pcmdinfo->in.u.qp_destroy.qp,
3832                                 pcmdinfo->in.u.qp_destroy.scratch,
3833                                 pcmdinfo->in.u.qp_destroy.remove_hash_idx,
3834                                 pcmdinfo->in.u.qp_destroy.
3835                                 ignore_mw_bnd,
3836                                 pcmdinfo->post_sq);
3837
3838                 break;
3839         case OP_ALLOC_STAG:
3840                 status = i40iw_sc_alloc_stag(
3841                                 pcmdinfo->in.u.alloc_stag.dev,
3842                                 &pcmdinfo->in.u.alloc_stag.info,
3843                                 pcmdinfo->in.u.alloc_stag.scratch,
3844                                 pcmdinfo->post_sq);
3845                 break;
3846         case OP_MR_REG_NON_SHARED:
3847                 status = i40iw_sc_mr_reg_non_shared(
3848                                 pcmdinfo->in.u.mr_reg_non_shared.dev,
3849                                 &pcmdinfo->in.u.mr_reg_non_shared.info,
3850                                 pcmdinfo->in.u.mr_reg_non_shared.scratch,
3851                                 pcmdinfo->post_sq);
3852
3853                 break;
3854         case OP_DEALLOC_STAG:
3855                 status = i40iw_sc_dealloc_stag(
3856                                 pcmdinfo->in.u.dealloc_stag.dev,
3857                                 &pcmdinfo->in.u.dealloc_stag.info,
3858                                 pcmdinfo->in.u.dealloc_stag.scratch,
3859                                 pcmdinfo->post_sq);
3860
3861                 break;
3862         case OP_MW_ALLOC:
3863                 status = i40iw_sc_mw_alloc(
3864                                 pcmdinfo->in.u.mw_alloc.dev,
3865                                 pcmdinfo->in.u.mw_alloc.scratch,
3866                                 pcmdinfo->in.u.mw_alloc.mw_stag_index,
3867                                 pcmdinfo->in.u.mw_alloc.pd_id,
3868                                 pcmdinfo->post_sq);
3869
3870                 break;
3871         case OP_QP_FLUSH_WQES:
3872                 status = i40iw_sc_qp_flush_wqes(
3873                                 pcmdinfo->in.u.qp_flush_wqes.qp,
3874                                 &pcmdinfo->in.u.qp_flush_wqes.info,
3875                                 pcmdinfo->in.u.qp_flush_wqes.
3876                                 scratch, pcmdinfo->post_sq);
3877                 break;
3878         case OP_ADD_ARP_CACHE_ENTRY:
3879                 status = i40iw_sc_add_arp_cache_entry(
3880                                 pcmdinfo->in.u.add_arp_cache_entry.cqp,
3881                                 &pcmdinfo->in.u.add_arp_cache_entry.info,
3882                                 pcmdinfo->in.u.add_arp_cache_entry.scratch,
3883                                 pcmdinfo->post_sq);
3884                 break;
3885         case OP_MANAGE_PUSH_PAGE:
3886                 status = i40iw_sc_manage_push_page(
3887                                 pcmdinfo->in.u.manage_push_page.cqp,
3888                                 &pcmdinfo->in.u.manage_push_page.info,
3889                                 pcmdinfo->in.u.manage_push_page.scratch,
3890                                 pcmdinfo->post_sq);
3891                 break;
3892         case OP_UPDATE_PE_SDS:
3893                 /* case I40IW_CQP_OP_UPDATE_PE_SDS */
3894                 status = i40iw_update_pe_sds(
3895                                 pcmdinfo->in.u.update_pe_sds.dev,
3896                                 &pcmdinfo->in.u.update_pe_sds.info,
3897                                 pcmdinfo->in.u.update_pe_sds.
3898                                 scratch);
3899
3900                 break;
3901         case OP_MANAGE_HMC_PM_FUNC_TABLE:
3902                 status = i40iw_sc_manage_hmc_pm_func_table(
3903                                 pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
3904                                 pcmdinfo->in.u.manage_hmc_pm.scratch,
3905                                 (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id,
3906                                 pcmdinfo->in.u.manage_hmc_pm.info.free_fcn,
3907                                 true);
3908                 break;
3909         case OP_SUSPEND:
3910                 status = i40iw_sc_suspend_qp(
3911                                 pcmdinfo->in.u.suspend_resume.cqp,
3912                                 pcmdinfo->in.u.suspend_resume.qp,
3913                                 pcmdinfo->in.u.suspend_resume.scratch);
3914                 break;
3915         case OP_RESUME:
3916                 status = i40iw_sc_resume_qp(
3917                                 pcmdinfo->in.u.suspend_resume.cqp,
3918                                 pcmdinfo->in.u.suspend_resume.qp,
3919                                 pcmdinfo->in.u.suspend_resume.scratch);
3920                 break;
3921         case OP_MANAGE_VF_PBLE_BP:
3922                 status = i40iw_manage_vf_pble_bp(
3923                                 pcmdinfo->in.u.manage_vf_pble_bp.cqp,
3924                                 &pcmdinfo->in.u.manage_vf_pble_bp.info,
3925                                 pcmdinfo->in.u.manage_vf_pble_bp.scratch, true);
3926                 break;
3927         case OP_QUERY_FPM_VALUES:
3928                 values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa;
3929                 values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va;
3930                 status = i40iw_sc_query_fpm_values(
3931                                 pcmdinfo->in.u.query_fpm_values.cqp,
3932                                 pcmdinfo->in.u.query_fpm_values.scratch,
3933                                 pcmdinfo->in.u.query_fpm_values.hmc_fn_id,
3934                                 &values_mem, true, I40IW_CQP_WAIT_EVENT);
3935                 break;
3936         case OP_COMMIT_FPM_VALUES:
3937                 values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa;
3938                 values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va;
3939                 status = i40iw_sc_commit_fpm_values(
3940                                 pcmdinfo->in.u.commit_fpm_values.cqp,
3941                                 pcmdinfo->in.u.commit_fpm_values.scratch,
3942                                 pcmdinfo->in.u.commit_fpm_values.hmc_fn_id,
3943                                 &values_mem,
3944                                 true,
3945                                 I40IW_CQP_WAIT_EVENT);
3946                 break;
3947         default:
3948                 status = I40IW_NOT_SUPPORTED;
3949                 break;
3950         }
3951
3952         return status;
3953 }
3954
3955 /**
3956  * i40iw_process_cqp_cmd - process all cqp commands
3957  * @dev: sc device struct
3958  * @pcmdinfo: cqp command info
3959  */
3960 enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
3961                                              struct cqp_commands_info *pcmdinfo)
3962 {
3963         enum i40iw_status_code status = 0;
3964         unsigned long   flags;
3965
3966         spin_lock_irqsave(&dev->cqp_lock, flags);
3967         if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
3968                 status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
3969         else
3970                 list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
3971         spin_unlock_irqrestore(&dev->cqp_lock, flags);
3972         return status;
3973 }
3974
3975 /**
3976  * i40iw_process_bh - called from tasklet for cqp list
3977  * @dev: sc device struct
3978  */
3979 enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
3980 {
3981         enum i40iw_status_code status = 0;
3982         struct cqp_commands_info *pcmdinfo;
3983         unsigned long   flags;
3984
3985         spin_lock_irqsave(&dev->cqp_lock, flags);
3986         while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
3987                 pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
3988
3989                 status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
3990                 if (status)
3991                         break;
3992         }
3993         spin_unlock_irqrestore(&dev->cqp_lock, flags);
3994         return status;
3995 }
3996
3997 /**
3998  * i40iw_iwarp_opcode - determine if incoming is rdma layer
3999  * @info: aeq info for the packet
4000  * @pkt: packet for error
4001  */
4002 static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
4003 {
4004         __be16 *mpa;
4005         u32 opcode = 0xffffffff;
4006
4007         if (info->q2_data_written) {
4008                 mpa = (__be16 *)pkt;
4009                 opcode = ntohs(mpa[1]) & 0xf;
4010         }
4011         return opcode;
4012 }
4013
4014 /**
4015  * i40iw_locate_mpa - return pointer to mpa in the pkt
4016  * @pkt: packet with data
4017  */
4018 static u8 *i40iw_locate_mpa(u8 *pkt)
4019 {
4020         /* skip over ethernet header */
4021         pkt += I40IW_MAC_HLEN;
4022
4023         /* Skip over IP and TCP headers */
4024         pkt += 4 * (pkt[0] & 0x0f);
4025         pkt += 4 * ((pkt[12] >> 4) & 0x0f);
4026         return pkt;
4027 }
4028
4029 /**
4030  * i40iw_setup_termhdr - termhdr for terminate pkt
4031  * @qp: sc qp ptr for pkt
4032  * @hdr: term hdr
4033  * @opcode: flush opcode for termhdr
4034  * @layer_etype: error layer + error type
4035  * @err: error cod ein the header
4036  */
4037 static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp,
4038                                 struct i40iw_terminate_hdr *hdr,
4039                                 enum i40iw_flush_opcode opcode,
4040                                 u8 layer_etype,
4041                                 u8 err)
4042 {
4043         qp->flush_code = opcode;
4044         hdr->layer_etype = layer_etype;
4045         hdr->error_code = err;
4046 }
4047
4048 /**
4049  * i40iw_bld_terminate_hdr - build terminate message header
4050  * @qp: qp associated with received terminate AE
4051  * @info: the struct contiaing AE information
4052  */
4053 static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
4054                                    struct i40iw_aeqe_info *info)
4055 {
4056         u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
4057         u16 ddp_seg_len;
4058         int copy_len = 0;
4059         u8 is_tagged = 0;
4060         enum i40iw_flush_opcode flush_code = FLUSH_INVALID;
4061         u32 opcode;
4062         struct i40iw_terminate_hdr *termhdr;
4063
4064         termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf;
4065         memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
4066
4067         if (info->q2_data_written) {
4068                 /* Use data from offending packet to fill in ddp & rdma hdrs */
4069                 pkt = i40iw_locate_mpa(pkt);
4070                 ddp_seg_len = ntohs(*(__be16 *)pkt);
4071                 if (ddp_seg_len) {
4072                         copy_len = 2;
4073                         termhdr->hdrct = DDP_LEN_FLAG;
4074                         if (pkt[2] & 0x80) {
4075                                 is_tagged = 1;
4076                                 if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
4077                                         copy_len += TERM_DDP_LEN_TAGGED;
4078                                         termhdr->hdrct |= DDP_HDR_FLAG;
4079                                 }
4080                         } else {
4081                                 if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
4082                                         copy_len += TERM_DDP_LEN_UNTAGGED;
4083                                         termhdr->hdrct |= DDP_HDR_FLAG;
4084                                 }
4085
4086                                 if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
4087                                         if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
4088                                                 copy_len += TERM_RDMA_LEN;
4089                                                 termhdr->hdrct |= RDMA_HDR_FLAG;
4090                                         }
4091                                 }
4092                         }
4093                 }
4094         }
4095
4096         opcode = i40iw_iwarp_opcode(info, pkt);
4097
4098         switch (info->ae_id) {
4099         case I40IW_AE_AMP_UNALLOCATED_STAG:
4100                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4101                 if (opcode == I40IW_OP_TYPE_RDMA_WRITE)
4102                         i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4103                                             (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG);
4104                 else
4105                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4106                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4107                 break;
4108         case I40IW_AE_AMP_BOUNDS_VIOLATION:
4109                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4110                 if (info->q2_data_written)
4111                         i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4112                                             (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS);
4113                 else
4114                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4115                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS);
4116                 break;
4117         case I40IW_AE_AMP_BAD_PD:
4118                 switch (opcode) {
4119                 case I40IW_OP_TYPE_RDMA_WRITE:
4120                         i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4121                                             (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG);
4122                         break;
4123                 case I40IW_OP_TYPE_SEND_INV:
4124                 case I40IW_OP_TYPE_SEND_SOL_INV:
4125                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4126                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG);
4127                         break;
4128                 default:
4129                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4130                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG);
4131                 }
4132                 break;
4133         case I40IW_AE_AMP_INVALID_STAG:
4134                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4135                 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4136                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4137                 break;
4138         case I40IW_AE_AMP_BAD_QP:
4139                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4140                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4141                 break;
4142         case I40IW_AE_AMP_BAD_STAG_KEY:
4143         case I40IW_AE_AMP_BAD_STAG_INDEX:
4144                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4145                 switch (opcode) {
4146                 case I40IW_OP_TYPE_SEND_INV:
4147                 case I40IW_OP_TYPE_SEND_SOL_INV:
4148                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4149                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG);
4150                         break;
4151                 default:
4152                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4153                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG);
4154                 }
4155                 break;
4156         case I40IW_AE_AMP_RIGHTS_VIOLATION:
4157         case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
4158         case I40IW_AE_PRIV_OPERATION_DENIED:
4159                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4160                 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4161                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS);
4162                 break;
4163         case I40IW_AE_AMP_TO_WRAP:
4164                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4165                 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4166                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
4167                 break;
4168         case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
4169                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4170                                     (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER);
4171                 break;
4172         case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
4173                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4174                                     (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
4175                 break;
4176         case I40IW_AE_LLP_SEGMENT_TOO_LARGE:
4177         case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
4178                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4179                                     (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4180                 break;
4181         case I40IW_AE_LCE_QP_CATASTROPHIC:
4182         case I40IW_AE_DDP_NO_L_BIT:
4183                 i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4184                                     (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4185                 break;
4186         case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
4187         case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
4188                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4189                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
4190                 break;
4191         case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
4192                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4193                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4194                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG);
4195                 break;
4196         case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
4197                 if (is_tagged)
4198                         i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4199                                             (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER);
4200                 else
4201                         i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4202                                             (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER);
4203                 break;
4204         case I40IW_AE_DDP_UBE_INVALID_MO:
4205                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4206                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO);
4207                 break;
4208         case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
4209                 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4210                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF);
4211                 break;
4212         case I40IW_AE_DDP_UBE_INVALID_QN:
4213                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4214                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4215                 break;
4216         case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
4217                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4218                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER);
4219                 break;
4220         case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
4221                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4222                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP);
4223                 break;
4224         default:
4225                 i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4226                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED);
4227                 break;
4228         }
4229
4230         if (copy_len)
4231                 memcpy(termhdr + 1, pkt, copy_len);
4232
4233         if (flush_code && !info->in_rdrsp_wr)
4234                 qp->sq_flush = (info->sq) ? true : false;
4235
4236         return sizeof(struct i40iw_terminate_hdr) + copy_len;
4237 }
4238
4239 /**
4240  * i40iw_terminate_send_fin() - Send fin for terminate message
4241  * @qp: qp associated with received terminate AE
4242  */
4243 void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp)
4244 {
4245         /* Send the fin only */
4246         i40iw_term_modify_qp(qp,
4247                              I40IW_QP_STATE_TERMINATE,
4248                              I40IWQP_TERM_SEND_FIN_ONLY,
4249                              0);
4250 }
4251
4252 /**
4253  * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
4254  * @qp: qp associated with received terminate AE
4255  * @info: the struct contiaing AE information
4256  */
4257 void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4258 {
4259         u8 termlen = 0;
4260
4261         if (qp->term_flags & I40IW_TERM_SENT)
4262                 return;         /* Sanity check */
4263
4264         /* Eventtype can change from bld_terminate_hdr */
4265         qp->eventtype = TERM_EVENT_QP_FATAL;
4266         termlen = i40iw_bld_terminate_hdr(qp, info);
4267         i40iw_terminate_start_timer(qp);
4268         qp->term_flags |= I40IW_TERM_SENT;
4269         i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE,
4270                              I40IWQP_TERM_SEND_TERM_ONLY, termlen);
4271 }
4272
4273 /**
4274  * i40iw_terminate_received - handle terminate received AE
4275  * @qp: qp associated with received terminate AE
4276  * @info: the struct contiaing AE information
4277  */
4278 void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4279 {
4280         u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
4281         __be32 *mpa;
4282         u8 ddp_ctl;
4283         u8 rdma_ctl;
4284         u16 aeq_id = 0;
4285         struct i40iw_terminate_hdr *termhdr;
4286
4287         mpa = (__be32 *)i40iw_locate_mpa(pkt);
4288         if (info->q2_data_written) {
4289                 /* did not validate the frame - do it now */
4290                 ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
4291                 rdma_ctl = ntohl(mpa[0]) & 0xff;
4292                 if ((ddp_ctl & 0xc0) != 0x40)
4293                         aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC;
4294                 else if ((ddp_ctl & 0x03) != 1)
4295                         aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION;
4296                 else if (ntohl(mpa[2]) != 2)
4297                         aeq_id = I40IW_AE_DDP_UBE_INVALID_QN;
4298                 else if (ntohl(mpa[3]) != 1)
4299                         aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN;
4300                 else if (ntohl(mpa[4]) != 0)
4301                         aeq_id = I40IW_AE_DDP_UBE_INVALID_MO;
4302                 else if ((rdma_ctl & 0xc0) != 0x40)
4303                         aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
4304
4305                 info->ae_id = aeq_id;
4306                 if (info->ae_id) {
4307                         /* Bad terminate recvd - send back a terminate */
4308                         i40iw_terminate_connection(qp, info);
4309                         return;
4310                 }
4311         }
4312
4313         qp->term_flags |= I40IW_TERM_RCVD;
4314         qp->eventtype = TERM_EVENT_QP_FATAL;
4315         termhdr = (struct i40iw_terminate_hdr *)&mpa[5];
4316         if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
4317             termhdr->layer_etype == RDMAP_REMOTE_OP) {
4318                 i40iw_terminate_done(qp, 0);
4319         } else {
4320                 i40iw_terminate_start_timer(qp);
4321                 i40iw_terminate_send_fin(qp);
4322         }
4323 }
4324
4325 /**
4326  * i40iw_hw_stat_init - Initiliaze HW stats table
4327  * @devstat: pestat struct
4328  * @fcn_idx: PCI fn id
4329  * @hw: PF i40iw_hw structure.
4330  * @is_pf: Is it a PF?
4331  *
4332  * Populate the HW stat table with register offset addr for each
4333  * stat. And start the perioidic stats timer.
4334  */
4335 static void i40iw_hw_stat_init(struct i40iw_dev_pestat *devstat,
4336                                u8 fcn_idx,
4337                                struct i40iw_hw *hw, bool is_pf)
4338 {
4339         u32 stat_reg_offset;
4340         u32 stat_index;
4341         struct i40iw_dev_hw_stat_offsets *stat_table =
4342                 &devstat->hw_stat_offsets;
4343         struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
4344
4345         devstat->hw = hw;
4346
4347         if (is_pf) {
4348                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
4349                                 I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
4350                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
4351                                 I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
4352                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
4353                                 I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
4354                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
4355                                 I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
4356                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
4357                                 I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
4358                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
4359                                 I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
4360                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
4361                                 I40E_GLPES_PFTCPRTXSEG(fcn_idx);
4362                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
4363                                 I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
4364                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
4365                                 I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
4366
4367                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
4368                                 I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
4369                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
4370                                 I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
4371                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
4372                                 I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
4373                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
4374                                 I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
4375                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
4376                                 I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
4377                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
4378                                 I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
4379                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
4380                                 I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
4381                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
4382                                 I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
4383                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
4384                                 I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
4385                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
4386                                 I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
4387                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
4388                                 I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
4389                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
4390                                 I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
4391                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
4392                                 I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
4393                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4394                                 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
4395                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4396                                 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
4397                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
4398                                 I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
4399                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
4400                                 I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
4401                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
4402                                 I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
4403                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
4404                                 I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
4405                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
4406                                 I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
4407                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
4408                                 I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
4409                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
4410                                 I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
4411                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
4412                                 I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
4413                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
4414                                 I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
4415                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
4416                                 I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
4417                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
4418                                 I40E_GLPES_PFRDMAVINVLO(fcn_idx);
4419         } else {
4420                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
4421                                 I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
4422                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
4423                                 I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
4424                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
4425                                 I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
4426                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
4427                                 I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
4428                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
4429                                 I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
4430                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
4431                                 I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
4432                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
4433                                 I40E_GLPES_VFTCPRTXSEG(fcn_idx);
4434                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
4435                                 I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
4436                 stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
4437                                 I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
4438
4439                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
4440                                 I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
4441                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
4442                                 I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
4443                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
4444                                 I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
4445                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
4446                                 I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
4447                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
4448                                 I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
4449                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
4450                                 I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
4451                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
4452                                 I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
4453                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
4454                                 I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
4455                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
4456                                 I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
4457                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
4458                                 I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
4459                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
4460                                 I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
4461                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
4462                                 I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
4463                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
4464                                 I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
4465                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4466                                 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
4467                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4468                                 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
4469                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
4470                                 I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
4471                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
4472                                 I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
4473                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
4474                                 I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
4475                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
4476                                 I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
4477                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
4478                                 I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
4479                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
4480                                 I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
4481                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
4482                                 I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
4483                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
4484                                 I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
4485                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
4486                                 I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
4487                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
4488                                 I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
4489                 stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
4490                                 I40E_GLPES_VFRDMAVINVLO(fcn_idx);
4491         }
4492
4493         for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
4494              stat_index++) {
4495                 stat_reg_offset = stat_table->stat_offset_64[stat_index];
4496                 last_rd_stats->stat_value_64[stat_index] =
4497                         readq(devstat->hw->hw_addr + stat_reg_offset);
4498         }
4499
4500         for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
4501              stat_index++) {
4502                 stat_reg_offset = stat_table->stat_offset_32[stat_index];
4503                 last_rd_stats->stat_value_32[stat_index] =
4504                         i40iw_rd32(devstat->hw, stat_reg_offset);
4505         }
4506 }
4507
4508 /**
4509  * i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs.
4510  * @devstat: pestat struct
4511  * @index: index in HW stat table which contains offset reg-addr
4512  * @value: hw stat value
4513  */
4514 static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat *devstat,
4515                                   enum i40iw_hw_stat_index_32b index,
4516                                   u64 *value)
4517 {
4518         struct i40iw_dev_hw_stat_offsets *stat_table =
4519                 &devstat->hw_stat_offsets;
4520         struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
4521         struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
4522         u64 new_stat_value = 0;
4523         u32 stat_reg_offset = stat_table->stat_offset_32[index];
4524
4525         new_stat_value = i40iw_rd32(devstat->hw, stat_reg_offset);
4526         /*roll-over case */
4527         if (new_stat_value < last_rd_stats->stat_value_32[index])
4528                 hw_stats->stat_value_32[index] += new_stat_value;
4529         else
4530                 hw_stats->stat_value_32[index] +=
4531                         new_stat_value - last_rd_stats->stat_value_32[index];
4532         last_rd_stats->stat_value_32[index] = new_stat_value;
4533         *value = hw_stats->stat_value_32[index];
4534 }
4535
4536 /**
4537  * i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs.
4538  * @devstat: pestat struct
4539  * @index: index in HW stat table which contains offset reg-addr
4540  * @value: hw stat value
4541  */
4542 static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat *devstat,
4543                                   enum i40iw_hw_stat_index_64b index,
4544                                   u64 *value)
4545 {
4546         struct i40iw_dev_hw_stat_offsets *stat_table =
4547                 &devstat->hw_stat_offsets;
4548         struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
4549         struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
4550         u64 new_stat_value = 0;
4551         u32 stat_reg_offset = stat_table->stat_offset_64[index];
4552
4553         new_stat_value = readq(devstat->hw->hw_addr + stat_reg_offset);
4554         /*roll-over case */
4555         if (new_stat_value < last_rd_stats->stat_value_64[index])
4556                 hw_stats->stat_value_64[index] += new_stat_value;
4557         else
4558                 hw_stats->stat_value_64[index] +=
4559                         new_stat_value - last_rd_stats->stat_value_64[index];
4560         last_rd_stats->stat_value_64[index] = new_stat_value;
4561         *value = hw_stats->stat_value_64[index];
4562 }
4563
4564 /**
4565  * i40iw_hw_stat_read_all - read all HW stat counters
4566  * @devstat: pestat struct
4567  * @stat_values: hw stats structure
4568  *
4569  * Read all the HW stat counters and populates hw_stats structure
4570  * of passed-in dev's pestat as well as copy created in stat_values.
4571  */
4572 static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat *devstat,
4573                                    struct i40iw_dev_hw_stats *stat_values)
4574 {
4575         u32 stat_index;
4576
4577         for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
4578              stat_index++)
4579                 i40iw_hw_stat_read_32(devstat, stat_index,
4580                                       &stat_values->stat_value_32[stat_index]);
4581         for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
4582              stat_index++)
4583                 i40iw_hw_stat_read_64(devstat, stat_index,
4584                                       &stat_values->stat_value_64[stat_index]);
4585 }
4586
4587 /**
4588  * i40iw_hw_stat_refresh_all - Update all HW stat structs
4589  * @devstat: pestat struct
4590  * @stat_values: hw stats structure
4591  *
4592  * Read all the HW stat counters to refresh values in hw_stats structure
4593  * of passed-in dev's pestat
4594  */
4595 static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat)
4596 {
4597         u64 stat_value;
4598         u32 stat_index;
4599
4600         for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
4601              stat_index++)
4602                 i40iw_hw_stat_read_32(devstat, stat_index, &stat_value);
4603         for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
4604              stat_index++)
4605                 i40iw_hw_stat_read_64(devstat, stat_index, &stat_value);
4606 }
4607
4608 static struct i40iw_cqp_ops iw_cqp_ops = {
4609         i40iw_sc_cqp_init,
4610         i40iw_sc_cqp_create,
4611         i40iw_sc_cqp_post_sq,
4612         i40iw_sc_cqp_get_next_send_wqe,
4613         i40iw_sc_cqp_destroy,
4614         i40iw_sc_poll_for_cqp_op_done
4615 };
4616
4617 static struct i40iw_ccq_ops iw_ccq_ops = {
4618         i40iw_sc_ccq_init,
4619         i40iw_sc_ccq_create,
4620         i40iw_sc_ccq_destroy,
4621         i40iw_sc_ccq_create_done,
4622         i40iw_sc_ccq_get_cqe_info,
4623         i40iw_sc_ccq_arm
4624 };
4625
4626 static struct i40iw_ceq_ops iw_ceq_ops = {
4627         i40iw_sc_ceq_init,
4628         i40iw_sc_ceq_create,
4629         i40iw_sc_cceq_create_done,
4630         i40iw_sc_cceq_destroy_done,
4631         i40iw_sc_cceq_create,
4632         i40iw_sc_ceq_destroy,
4633         i40iw_sc_process_ceq
4634 };
4635
4636 static struct i40iw_aeq_ops iw_aeq_ops = {
4637         i40iw_sc_aeq_init,
4638         i40iw_sc_aeq_create,
4639         i40iw_sc_aeq_destroy,
4640         i40iw_sc_get_next_aeqe,
4641         i40iw_sc_repost_aeq_entries,
4642         i40iw_sc_aeq_create_done,
4643         i40iw_sc_aeq_destroy_done
4644 };
4645
4646 /* iwarp pd ops */
4647 static struct i40iw_pd_ops iw_pd_ops = {
4648         i40iw_sc_pd_init,
4649 };
4650
4651 static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
4652         .qp_init = i40iw_sc_qp_init,
4653         .qp_create = i40iw_sc_qp_create,
4654         .qp_modify = i40iw_sc_qp_modify,
4655         .qp_destroy = i40iw_sc_qp_destroy,
4656         .qp_flush_wqes = i40iw_sc_qp_flush_wqes,
4657         .qp_upload_context = i40iw_sc_qp_upload_context,
4658         .qp_setctx = i40iw_sc_qp_setctx,
4659         .qp_send_lsmm = i40iw_sc_send_lsmm,
4660         .qp_send_lsmm_nostag = i40iw_sc_send_lsmm_nostag,
4661         .qp_send_rtt = i40iw_sc_send_rtt,
4662         .qp_post_wqe0 = i40iw_sc_post_wqe0,
4663         .iw_mr_fast_register = i40iw_sc_mr_fast_register
4664 };
4665
4666 static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
4667         i40iw_sc_cq_init,
4668         i40iw_sc_cq_create,
4669         i40iw_sc_cq_destroy,
4670         i40iw_sc_cq_modify,
4671 };
4672
4673 static struct i40iw_mr_ops iw_mr_ops = {
4674         i40iw_sc_alloc_stag,
4675         i40iw_sc_mr_reg_non_shared,
4676         i40iw_sc_mr_reg_shared,
4677         i40iw_sc_dealloc_stag,
4678         i40iw_sc_query_stag,
4679         i40iw_sc_mw_alloc
4680 };
4681
4682 static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
4683         i40iw_sc_manage_push_page,
4684         i40iw_sc_manage_hmc_pm_func_table,
4685         i40iw_sc_set_hmc_resource_profile,
4686         i40iw_sc_commit_fpm_values,
4687         i40iw_sc_query_fpm_values,
4688         i40iw_sc_static_hmc_pages_allocated,
4689         i40iw_sc_add_arp_cache_entry,
4690         i40iw_sc_del_arp_cache_entry,
4691         i40iw_sc_query_arp_cache_entry,
4692         i40iw_sc_manage_apbvt_entry,
4693         i40iw_sc_manage_qhash_table_entry,
4694         i40iw_sc_alloc_local_mac_ipaddr_entry,
4695         i40iw_sc_add_local_mac_ipaddr_entry,
4696         i40iw_sc_del_local_mac_ipaddr_entry,
4697         i40iw_sc_cqp_nop,
4698         i40iw_sc_commit_fpm_values_done,
4699         i40iw_sc_query_fpm_values_done,
4700         i40iw_sc_manage_hmc_pm_func_table_done,
4701         i40iw_sc_suspend_qp,
4702         i40iw_sc_resume_qp
4703 };
4704
4705 static struct i40iw_hmc_ops iw_hmc_ops = {
4706         i40iw_sc_init_iw_hmc,
4707         i40iw_sc_parse_fpm_query_buf,
4708         i40iw_sc_configure_iw_fpm,
4709         i40iw_sc_parse_fpm_commit_buf,
4710         i40iw_sc_create_hmc_obj,
4711         i40iw_sc_del_hmc_obj,
4712         NULL,
4713         NULL
4714 };
4715
4716 static const struct i40iw_device_pestat_ops iw_device_pestat_ops = {
4717         i40iw_hw_stat_init,
4718         i40iw_hw_stat_read_32,
4719         i40iw_hw_stat_read_64,
4720         i40iw_hw_stat_read_all,
4721         i40iw_hw_stat_refresh_all
4722 };
4723
4724 /**
4725  * i40iw_device_init_pestat - Initialize the pestat structure
4726  * @dev: pestat struct
4727  */
4728 enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat)
4729 {
4730         devstat->ops = iw_device_pestat_ops;
4731         return 0;
4732 }
4733
4734 /**
4735  * i40iw_device_init - Initialize IWARP device
4736  * @dev: IWARP device pointer
4737  * @info: IWARP init info
4738  */
4739 enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
4740                                          struct i40iw_device_init_info *info)
4741 {
4742         u32 val;
4743         u32 vchnl_ver = 0;
4744         u16 hmc_fcn = 0;
4745         enum i40iw_status_code ret_code = 0;
4746         u8 db_size;
4747
4748         spin_lock_init(&dev->cqp_lock);
4749         INIT_LIST_HEAD(&dev->cqp_cmd_head);             /* for the cqp commands backlog. */
4750
4751         i40iw_device_init_uk(&dev->dev_uk);
4752
4753         dev->debug_mask = info->debug_mask;
4754
4755         ret_code = i40iw_device_init_pestat(&dev->dev_pestat);
4756         if (ret_code) {
4757                 i40iw_debug(dev, I40IW_DEBUG_DEV,
4758                             "%s: i40iw_device_init_pestat failed\n", __func__);
4759                 return ret_code;
4760         }
4761         dev->hmc_fn_id = info->hmc_fn_id;
4762         dev->qs_handle = info->qs_handle;
4763         dev->exception_lan_queue = info->exception_lan_queue;
4764         dev->is_pf = info->is_pf;
4765
4766         dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
4767         dev->fpm_query_buf = info->fpm_query_buf;
4768
4769         dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
4770         dev->fpm_commit_buf = info->fpm_commit_buf;
4771
4772         dev->hw = info->hw;
4773         dev->hw->hw_addr = info->bar0;
4774
4775         val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
4776         dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
4777
4778         if (dev->is_pf) {
4779                 dev->dev_pestat.ops.iw_hw_stat_init(&dev->dev_pestat,
4780                         dev->hmc_fn_id, dev->hw, true);
4781                 spin_lock_init(&dev->dev_pestat.stats_lock);
4782                 /*start the periodic stats_timer */
4783                 i40iw_hw_stats_start_timer(dev);
4784                 val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
4785                 db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
4786                 if ((db_size != I40IW_PE_DB_SIZE_4M) &&
4787                     (db_size != I40IW_PE_DB_SIZE_8M)) {
4788                         i40iw_debug(dev, I40IW_DEBUG_DEV,
4789                                     "%s: PE doorbell is not enabled in CSR val 0x%x\n",
4790                                     __func__, val);
4791                         ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED;
4792                         return ret_code;
4793                 }
4794                 dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET;
4795                 dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf;
4796         } else {
4797                 dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET;
4798         }
4799
4800         dev->cqp_ops = &iw_cqp_ops;
4801         dev->ccq_ops = &iw_ccq_ops;
4802         dev->ceq_ops = &iw_ceq_ops;
4803         dev->aeq_ops = &iw_aeq_ops;
4804         dev->cqp_misc_ops = &iw_cqp_misc_ops;
4805         dev->iw_pd_ops = &iw_pd_ops;
4806         dev->iw_priv_qp_ops = &iw_priv_qp_ops;
4807         dev->iw_priv_cq_ops = &iw_priv_cq_ops;
4808         dev->mr_ops = &iw_mr_ops;
4809         dev->hmc_ops = &iw_hmc_ops;
4810         dev->vchnl_if.vchnl_send = info->vchnl_send;
4811         if (dev->vchnl_if.vchnl_send)
4812                 dev->vchnl_up = true;
4813         else
4814                 dev->vchnl_up = false;
4815         if (!dev->is_pf) {
4816                 dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf;
4817                 ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver);
4818                 if (!ret_code) {
4819                         i40iw_debug(dev, I40IW_DEBUG_DEV,
4820                                     "%s: Get Channel version rc = 0x%0x, version is %u\n",
4821                                 __func__, ret_code, vchnl_ver);
4822                         ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn);
4823                         if (!ret_code) {
4824                                 i40iw_debug(dev, I40IW_DEBUG_DEV,
4825                                             "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
4826                                             __func__, ret_code, hmc_fcn);
4827                                 dev->hmc_fn_id = (u8)hmc_fcn;
4828                         }
4829                 }
4830         }
4831         dev->iw_vf_cqp_ops = &iw_vf_cqp_ops;
4832
4833         return ret_code;
4834 }