GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / net / ethernet / qlogic / qed / qed_init_fw_funcs.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/types.h>
34 #include <linux/delay.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/string.h>
38 #include "qed_hsi.h"
39 #include "qed_hw.h"
40 #include "qed_init_ops.h"
41 #include "qed_reg_addr.h"
42
43 /* General constants */
44 #define QM_PQ_MEM_4KB(pq_size)  (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
45                                                         QM_PQ_ELEMENT_SIZE, \
46                                                         0x1000) : 0)
47 #define QM_PQ_SIZE_256B(pq_size)        (pq_size ? DIV_ROUND_UP(pq_size, \
48                                                                 0x100) - 1 : 0)
49 #define QM_INVALID_PQ_ID                        0xffff
50 /* Feature enable */
51 #define QM_BYPASS_EN                            1
52 #define QM_BYTE_CRD_EN                          1
53 /* Other PQ constants */
54 #define QM_OTHER_PQS_PER_PF                     4
55 /* WFQ constants */
56 #define QM_WFQ_UPPER_BOUND              62500000
57 #define QM_WFQ_VP_PQ_VOQ_SHIFT          0
58 #define QM_WFQ_VP_PQ_PF_SHIFT           5
59 #define QM_WFQ_INC_VAL(weight)          ((weight) * 0x9000)
60 #define QM_WFQ_MAX_INC_VAL                      43750000
61
62 /* RL constants */
63 #define QM_RL_UPPER_BOUND                       62500000
64 #define QM_RL_PERIOD                            5               /* in us */
65 #define QM_RL_PERIOD_CLK_25M            (25 * QM_RL_PERIOD)
66 #define QM_RL_MAX_INC_VAL                       43750000
67 #define QM_RL_INC_VAL(rate)             max_t(u32,      \
68                                               (u32)(((rate ? rate : \
69                                                       1000000) *    \
70                                                      QM_RL_PERIOD * \
71                                                      101) / (8 * 100)), 1)
72 /* AFullOprtnstcCrdMask constants */
73 #define QM_OPPOR_LINE_VOQ_DEF           1
74 #define QM_OPPOR_FW_STOP_DEF            0
75 #define QM_OPPOR_PQ_EMPTY_DEF           1
76 /* Command Queue constants */
77 #define PBF_CMDQ_PURE_LB_LINES                          150
78 #define PBF_CMDQ_LINES_RT_OFFSET(voq)           (                \
79                 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
80                 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET -      \
81                  PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
82 #define PBF_BTB_GUARANTEED_RT_OFFSET(voq)       (             \
83                 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
84                 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET -      \
85                  PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
86 #define QM_VOQ_LINE_CRD(pbf_cmd_lines)          ((((pbf_cmd_lines) - \
87                                                    4) *              \
88                                                   2) | QM_LINE_CRD_REG_SIGN_BIT)
89 /* BTB: blocks constants (block size = 256B) */
90 #define BTB_JUMBO_PKT_BLOCKS            38
91 #define BTB_HEADROOM_BLOCKS                     BTB_JUMBO_PKT_BLOCKS
92 #define BTB_PURE_LB_FACTOR                      10
93 #define BTB_PURE_LB_RATIO                       7
94 /* QM stop command constants */
95 #define QM_STOP_PQ_MASK_WIDTH           32
96 #define QM_STOP_CMD_ADDR                2
97 #define QM_STOP_CMD_STRUCT_SIZE         2
98 #define QM_STOP_CMD_PAUSE_MASK_OFFSET   0
99 #define QM_STOP_CMD_PAUSE_MASK_SHIFT    0
100 #define QM_STOP_CMD_PAUSE_MASK_MASK     -1
101 #define QM_STOP_CMD_GROUP_ID_OFFSET     1
102 #define QM_STOP_CMD_GROUP_ID_SHIFT      16
103 #define QM_STOP_CMD_GROUP_ID_MASK       15
104 #define QM_STOP_CMD_PQ_TYPE_OFFSET      1
105 #define QM_STOP_CMD_PQ_TYPE_SHIFT       24
106 #define QM_STOP_CMD_PQ_TYPE_MASK        1
107 #define QM_STOP_CMD_MAX_POLL_COUNT      100
108 #define QM_STOP_CMD_POLL_PERIOD_US      500
109
110 /* QM command macros */
111 #define QM_CMD_STRUCT_SIZE(cmd)                 cmd ## \
112         _STRUCT_SIZE
113 #define QM_CMD_SET_FIELD(var, cmd, field,                                 \
114                          value)        SET_FIELD(var[cmd ## _ ## field ## \
115                                                      _OFFSET],            \
116                                                  cmd ## _ ## field,       \
117                                                  value)
118 /* QM: VOQ macros */
119 #define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) *     \
120                                                    (max_phys_tcs_per_port) + \
121                                                    (tc))
122 #define LB_VOQ(port)                            ( \
123                 MAX_PHYS_VOQS + (port))
124 #define VOQ(port, tc, max_phy_tcs_pr_port)      \
125         ((tc) <         \
126          LB_TC ? PHYS_VOQ(port,         \
127                           tc,                    \
128                           max_phy_tcs_pr_port) \
129                 : LB_VOQ(port))
130 /******************** INTERNAL IMPLEMENTATION *********************/
131 /* Prepare PF RL enable/disable runtime init values */
132 static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
133 {
134         STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
135         if (pf_rl_en) {
136                 /* Enable RLs for all VOQs */
137                 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
138                              (1 << MAX_NUM_VOQS) - 1);
139                 /* Write RL period */
140                 STORE_RT_REG(p_hwfn,
141                              QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
142                 STORE_RT_REG(p_hwfn,
143                              QM_REG_RLPFPERIODTIMER_RT_OFFSET,
144                              QM_RL_PERIOD_CLK_25M);
145
146                 /* Set credit threshold for QM bypass flow */
147                 if (QM_BYPASS_EN)
148                         STORE_RT_REG(p_hwfn,
149                                      QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
150                                      QM_RL_UPPER_BOUND);
151         }
152 }
153
154 /* Prepare PF WFQ enable/disable runtime init values */
155 static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
156 {
157         STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
158
159         /* Set credit threshold for QM bypass flow */
160         if (pf_wfq_en && QM_BYPASS_EN)
161                 STORE_RT_REG(p_hwfn,
162                              QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
163                              QM_WFQ_UPPER_BOUND);
164 }
165
166 /* Prepare VPORT RL enable/disable runtime init values */
167 static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
168 {
169         STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
170                      vport_rl_en ? 1 : 0);
171         if (vport_rl_en) {
172                 /* Write RL period (use timer 0 only) */
173                 STORE_RT_REG(p_hwfn,
174                              QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
175                              QM_RL_PERIOD_CLK_25M);
176                 STORE_RT_REG(p_hwfn,
177                              QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
178                              QM_RL_PERIOD_CLK_25M);
179
180                 /* Set credit threshold for QM bypass flow */
181                 if (QM_BYPASS_EN)
182                         STORE_RT_REG(p_hwfn,
183                                      QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
184                                      QM_RL_UPPER_BOUND);
185         }
186 }
187
188 /* Prepare VPORT WFQ enable/disable runtime init values */
189 static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
190 {
191         STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
192                      vport_wfq_en ? 1 : 0);
193
194         /* Set credit threshold for QM bypass flow */
195         if (vport_wfq_en && QM_BYPASS_EN)
196                 STORE_RT_REG(p_hwfn,
197                              QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
198                              QM_WFQ_UPPER_BOUND);
199 }
200
201 /* Prepare runtime init values to allocate PBF command queue lines for
202  * the specified VOQ.
203  */
204 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
205                                        u8 voq, u16 cmdq_lines)
206 {
207         u32 qm_line_crd;
208
209         qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
210         OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
211                          (u32)cmdq_lines);
212         STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
213         STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
214                      qm_line_crd);
215 }
216
217 /* Prepare runtime init values to allocate PBF command queue lines. */
218 static void qed_cmdq_lines_rt_init(
219         struct qed_hwfn *p_hwfn,
220         u8 max_ports_per_engine,
221         u8 max_phys_tcs_per_port,
222         struct init_qm_port_params port_params[MAX_NUM_PORTS])
223 {
224         u8 tc, voq, port_id, num_tcs_in_port;
225
226         /* Clear PBF lines for all VOQs */
227         for (voq = 0; voq < MAX_NUM_VOQS; voq++)
228                 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
229         for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
230                 if (port_params[port_id].active) {
231                         u16 phys_lines, phys_lines_per_tc;
232
233                         /* find #lines to divide between active phys TCs */
234                         phys_lines = port_params[port_id].num_pbf_cmd_lines -
235                                      PBF_CMDQ_PURE_LB_LINES;
236                         /* find #lines per active physical TC */
237                         num_tcs_in_port = 0;
238                         for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
239                                 if (((port_params[port_id].active_phys_tcs >>
240                                       tc) & 0x1) == 1)
241                                         num_tcs_in_port++;
242                         }
243
244                         phys_lines_per_tc = phys_lines / num_tcs_in_port;
245                         /* init registers per active TC */
246                         for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
247                                 if (((port_params[port_id].active_phys_tcs >>
248                                       tc) & 0x1) != 1)
249                                         continue;
250
251                                 voq = PHYS_VOQ(port_id, tc,
252                                                max_phys_tcs_per_port);
253                                 qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
254                                                            phys_lines_per_tc);
255                         }
256
257                         /* init registers for pure LB TC */
258                         qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
259                                                    PBF_CMDQ_PURE_LB_LINES);
260                 }
261         }
262 }
263
264 static void qed_btb_blocks_rt_init(
265         struct qed_hwfn *p_hwfn,
266         u8 max_ports_per_engine,
267         u8 max_phys_tcs_per_port,
268         struct init_qm_port_params port_params[MAX_NUM_PORTS])
269 {
270         u32 usable_blocks, pure_lb_blocks, phys_blocks;
271         u8 tc, voq, port_id, num_tcs_in_port;
272
273         for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
274                 u32 temp;
275
276                 if (!port_params[port_id].active)
277                         continue;
278
279                 /* Subtract headroom blocks */
280                 usable_blocks = port_params[port_id].num_btb_blocks -
281                                 BTB_HEADROOM_BLOCKS;
282
283                 /* find blocks per physical TC */
284                 num_tcs_in_port = 0;
285                 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
286                         if (((port_params[port_id].active_phys_tcs >>
287                               tc) & 0x1) == 1)
288                                 num_tcs_in_port++;
289                 }
290
291                 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
292                                  (num_tcs_in_port * BTB_PURE_LB_FACTOR +
293                                   BTB_PURE_LB_RATIO);
294                 pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
295                                        pure_lb_blocks / BTB_PURE_LB_FACTOR);
296                 phys_blocks = (usable_blocks - pure_lb_blocks) /
297                               num_tcs_in_port;
298
299                 /* Init physical TCs */
300                 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
301                         if (((port_params[port_id].active_phys_tcs >>
302                               tc) & 0x1) != 1)
303                                 continue;
304
305                         voq = PHYS_VOQ(port_id, tc,
306                                        max_phys_tcs_per_port);
307                         STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
308                                      phys_blocks);
309                 }
310
311                 /* Init pure LB TC */
312                 temp = LB_VOQ(port_id);
313                 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
314                              pure_lb_blocks);
315         }
316 }
317
318 /* Prepare Tx PQ mapping runtime init values for the specified PF */
319 static void qed_tx_pq_map_rt_init(
320         struct qed_hwfn *p_hwfn,
321         struct qed_ptt *p_ptt,
322         struct qed_qm_pf_rt_init_params *p_params,
323         u32 base_mem_addr_4kb)
324 {
325         struct init_qm_vport_params *vport_params = p_params->vport_params;
326         u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
327         u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
328         u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
329                             QM_PF_QUEUE_GROUP_SIZE;
330         u16 i, pq_id, pq_group;
331
332         /* A bit per Tx PQ indicating if the PQ is associated with a VF */
333         u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
334         u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
335         u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
336         u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
337         u32 mem_addr_4kb = base_mem_addr_4kb;
338
339         /* Set mapping from PQ group to PF */
340         for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
341                 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
342                              (u32)(p_params->pf_id));
343         /* Set PQ sizes */
344         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
345                      QM_PQ_SIZE_256B(p_params->num_pf_cids));
346         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
347                      QM_PQ_SIZE_256B(p_params->num_vf_cids));
348
349         /* Go over all Tx PQs */
350         for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
351                 u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
352                              p_params->max_phys_tcs_per_port);
353                 bool is_vf_pq = (i >= p_params->num_pf_pqs);
354                 struct qm_rf_pq_map tx_pq_map;
355
356                 bool rl_valid = p_params->pq_params[i].rl_valid &&
357                                 (p_params->pq_params[i].vport_id <
358                                  MAX_QM_GLOBAL_RLS);
359
360                 /* Update first Tx PQ of VPORT/TC */
361                 u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
362                                     p_params->start_vport;
363                 u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
364                 u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
365
366                 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
367                         /* Create new VP PQ */
368                         pq_ids[p_params->pq_params[i].tc_id] = pq_id;
369                         first_tx_pq_id = pq_id;
370
371                         /* Map VP PQ to VOQ and PF */
372                         STORE_RT_REG(p_hwfn,
373                                      QM_REG_WFQVPMAP_RT_OFFSET +
374                                      first_tx_pq_id,
375                                      (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
376                                      (p_params->pf_id <<
377                                       QM_WFQ_VP_PQ_PF_SHIFT));
378                 }
379
380                 if (p_params->pq_params[i].rl_valid && !rl_valid)
381                         DP_NOTICE(p_hwfn,
382                                   "Invalid VPORT ID for rate limiter configuration");
383                 /* Fill PQ map entry */
384                 memset(&tx_pq_map, 0, sizeof(tx_pq_map));
385                 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
386                 SET_FIELD(tx_pq_map.reg,
387                           QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
388                 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
389                 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
390                           rl_valid ?
391                           p_params->pq_params[i].vport_id : 0);
392                 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
393                 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
394                           p_params->pq_params[i].wrr_group);
395                 /* Write PQ map entry to CAM */
396                 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
397                              *((u32 *)&tx_pq_map));
398                 /* Set base address */
399                 STORE_RT_REG(p_hwfn,
400                              QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
401                              mem_addr_4kb);
402
403                 /* If VF PQ, add indication to PQ VF mask */
404                 if (is_vf_pq) {
405                         tx_pq_vf_mask[pq_id /
406                                       QM_PF_QUEUE_GROUP_SIZE] |=
407                             BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
408                         mem_addr_4kb += vport_pq_mem_4kb;
409                 } else {
410                         mem_addr_4kb += pq_mem_4kb;
411                 }
412         }
413
414         /* Store Tx PQ VF mask to size select register */
415         for (i = 0; i < num_tx_pq_vf_masks; i++)
416                 if (tx_pq_vf_mask[i])
417                         STORE_RT_REG(p_hwfn,
418                                      QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
419                                      tx_pq_vf_mask[i]);
420 }
421
422 /* Prepare Other PQ mapping runtime init values for the specified PF */
423 static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
424                                      u8 port_id,
425                                      u8 pf_id,
426                                      u32 num_pf_cids,
427                                      u32 num_tids, u32 base_mem_addr_4kb)
428 {
429         u32 pq_size, pq_mem_4kb, mem_addr_4kb;
430         u16 i, pq_id, pq_group;
431
432         /* a single other PQ group is used in each PF,
433          * where PQ group i is used in PF i.
434          */
435         pq_group = pf_id;
436         pq_size = num_pf_cids + num_tids;
437         pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
438         mem_addr_4kb = base_mem_addr_4kb;
439
440         /* Map PQ group to PF */
441         STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
442                      (u32)(pf_id));
443         /* Set PQ sizes */
444         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
445                      QM_PQ_SIZE_256B(pq_size));
446
447         /* Set base address */
448         for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
449              i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
450                 STORE_RT_REG(p_hwfn,
451                              QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
452                              mem_addr_4kb);
453                 mem_addr_4kb += pq_mem_4kb;
454         }
455 }
456
457 /* Prepare PF WFQ runtime init values for the specified PF.
458  * Return -1 on error.
459  */
460 static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
461                               struct qed_qm_pf_rt_init_params *p_params)
462 {
463         u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
464         u32 crd_reg_offset;
465         u32 inc_val;
466         u16 i;
467
468         if (p_params->pf_id < MAX_NUM_PFS_BB)
469                 crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
470         else
471                 crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
472         crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
473
474         inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
475         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
476                 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
477                 return -1;
478         }
479
480         for (i = 0; i < num_tx_pqs; i++) {
481                 u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
482                              p_params->max_phys_tcs_per_port);
483
484                 OVERWRITE_RT_REG(p_hwfn,
485                                  crd_reg_offset + voq * MAX_NUM_PFS_BB,
486                                  QM_WFQ_CRD_REG_SIGN_BIT);
487         }
488
489         STORE_RT_REG(p_hwfn,
490                      QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
491                      QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
492         STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
493                      inc_val);
494         return 0;
495 }
496
497 /* Prepare PF RL runtime init values for the specified PF.
498  * Return -1 on error.
499  */
500 static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
501 {
502         u32 inc_val = QM_RL_INC_VAL(pf_rl);
503
504         if (inc_val > QM_RL_MAX_INC_VAL) {
505                 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
506                 return -1;
507         }
508         STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
509                      QM_RL_CRD_REG_SIGN_BIT);
510         STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
511                      QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
512         STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
513         return 0;
514 }
515
516 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
517  * Return -1 on error.
518  */
519 static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
520                               u8 num_vports,
521                               struct init_qm_vport_params *vport_params)
522 {
523         u32 inc_val;
524         u8 tc, i;
525
526         /* Go over all PF VPORTs */
527         for (i = 0; i < num_vports; i++) {
528
529                 if (!vport_params[i].vport_wfq)
530                         continue;
531
532                 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
533                 if (inc_val > QM_WFQ_MAX_INC_VAL) {
534                         DP_NOTICE(p_hwfn,
535                                   "Invalid VPORT WFQ weight configuration\n");
536                         return -1;
537                 }
538
539                 /* each VPORT can have several VPORT PQ IDs for
540                  * different TCs
541                  */
542                 for (tc = 0; tc < NUM_OF_TCS; tc++) {
543                         u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
544
545                         if (vport_pq_id != QM_INVALID_PQ_ID) {
546                                 STORE_RT_REG(p_hwfn,
547                                              QM_REG_WFQVPCRD_RT_OFFSET +
548                                              vport_pq_id,
549                                              QM_WFQ_CRD_REG_SIGN_BIT);
550                                 STORE_RT_REG(p_hwfn,
551                                              QM_REG_WFQVPWEIGHT_RT_OFFSET +
552                                              vport_pq_id, inc_val);
553                         }
554                 }
555         }
556
557         return 0;
558 }
559
560 static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
561                                 u8 start_vport,
562                                 u8 num_vports,
563                                 struct init_qm_vport_params *vport_params)
564 {
565         u8 i, vport_id;
566
567         if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
568                 DP_NOTICE(p_hwfn,
569                           "Invalid VPORT ID for rate limiter configuration\n");
570                 return -1;
571         }
572
573         /* Go over all PF VPORTs */
574         for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
575                 u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
576
577                 if (inc_val > QM_RL_MAX_INC_VAL) {
578                         DP_NOTICE(p_hwfn,
579                                   "Invalid VPORT rate-limit configuration\n");
580                         return -1;
581                 }
582
583                 STORE_RT_REG(p_hwfn,
584                              QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
585                              QM_RL_CRD_REG_SIGN_BIT);
586                 STORE_RT_REG(p_hwfn,
587                              QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
588                              QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
589                 STORE_RT_REG(p_hwfn,
590                              QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
591                              inc_val);
592         }
593
594         return 0;
595 }
596
597 static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
598                                      struct qed_ptt *p_ptt)
599 {
600         u32 reg_val, i;
601
602         for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
603              i++) {
604                 udelay(QM_STOP_CMD_POLL_PERIOD_US);
605                 reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
606         }
607
608         /* Check if timeout while waiting for SDM command ready */
609         if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
610                 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
611                            "Timeout when waiting for QM SDM command ready signal\n");
612                 return false;
613         }
614
615         return true;
616 }
617
618 static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
619                             struct qed_ptt *p_ptt,
620                             u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
621 {
622         if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
623                 return false;
624
625         qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
626         qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
627         qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
628         qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
629         qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
630
631         return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
632 }
633
634 /******************** INTERFACE IMPLEMENTATION *********************/
635 u32 qed_qm_pf_mem_size(u8 pf_id,
636                        u32 num_pf_cids,
637                        u32 num_vf_cids,
638                        u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
639 {
640         return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
641                QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
642                QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
643 }
644
645 int qed_qm_common_rt_init(
646         struct qed_hwfn *p_hwfn,
647         struct qed_qm_common_rt_init_params *p_params)
648 {
649         /* init AFullOprtnstcCrdMask */
650         u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
651                     QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
652                    (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
653                    (p_params->pf_wfq_en <<
654                     QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
655                    (p_params->vport_wfq_en <<
656                     QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
657                    (p_params->pf_rl_en <<
658                     QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
659                    (p_params->vport_rl_en <<
660                     QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
661                    (QM_OPPOR_FW_STOP_DEF <<
662                     QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
663                    (QM_OPPOR_PQ_EMPTY_DEF <<
664                     QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
665
666         STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
667         qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
668         qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
669         qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
670         qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
671         qed_cmdq_lines_rt_init(p_hwfn,
672                                p_params->max_ports_per_engine,
673                                p_params->max_phys_tcs_per_port,
674                                p_params->port_params);
675         qed_btb_blocks_rt_init(p_hwfn,
676                                p_params->max_ports_per_engine,
677                                p_params->max_phys_tcs_per_port,
678                                p_params->port_params);
679         return 0;
680 }
681
682 int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
683                       struct qed_ptt *p_ptt,
684                       struct qed_qm_pf_rt_init_params *p_params)
685 {
686         struct init_qm_vport_params *vport_params = p_params->vport_params;
687         u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
688                                                p_params->num_tids) *
689                                  QM_OTHER_PQS_PER_PF;
690         u8 tc, i;
691
692         /* Clear first Tx PQ ID array for each VPORT */
693         for (i = 0; i < p_params->num_vports; i++)
694                 for (tc = 0; tc < NUM_OF_TCS; tc++)
695                         vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
696
697         /* Map Other PQs (if any) */
698         qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
699                                  p_params->num_pf_cids, p_params->num_tids, 0);
700
701         /* Map Tx PQs */
702         qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
703
704         if (p_params->pf_wfq)
705                 if (qed_pf_wfq_rt_init(p_hwfn, p_params))
706                         return -1;
707
708         if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
709                 return -1;
710
711         if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
712                 return -1;
713
714         if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
715                                  p_params->num_vports, vport_params))
716                 return -1;
717
718         return 0;
719 }
720
721 int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
722                     struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
723 {
724         u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
725
726         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
727                 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
728                 return -1;
729         }
730
731         qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
732         return 0;
733 }
734
735 int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
736                    struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
737 {
738         u32 inc_val = QM_RL_INC_VAL(pf_rl);
739
740         if (inc_val > QM_RL_MAX_INC_VAL) {
741                 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
742                 return -1;
743         }
744
745         qed_wr(p_hwfn, p_ptt,
746                QM_REG_RLPFCRD + pf_id * 4,
747                QM_RL_CRD_REG_SIGN_BIT);
748         qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
749
750         return 0;
751 }
752
753 int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
754                        struct qed_ptt *p_ptt,
755                        u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
756 {
757         u16 vport_pq_id;
758         u32 inc_val;
759         u8 tc;
760
761         inc_val = QM_WFQ_INC_VAL(vport_wfq);
762         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
763                 DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n");
764                 return -1;
765         }
766
767         for (tc = 0; tc < NUM_OF_TCS; tc++) {
768                 vport_pq_id = first_tx_pq_id[tc];
769                 if (vport_pq_id != QM_INVALID_PQ_ID)
770                         qed_wr(p_hwfn, p_ptt,
771                                QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
772                                inc_val);
773         }
774
775         return 0;
776 }
777
778 int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
779                       struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
780 {
781         u32 inc_val = QM_RL_INC_VAL(vport_rl);
782
783         if (vport_id >= MAX_QM_GLOBAL_RLS) {
784                 DP_NOTICE(p_hwfn,
785                           "Invalid VPORT ID for rate limiter configuration\n");
786                 return -1;
787         }
788
789         if (inc_val > QM_RL_MAX_INC_VAL) {
790                 DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
791                 return -1;
792         }
793
794         qed_wr(p_hwfn, p_ptt,
795                QM_REG_RLGLBLCRD + vport_id * 4,
796                QM_RL_CRD_REG_SIGN_BIT);
797         qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
798
799         return 0;
800 }
801
802 bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
803                           struct qed_ptt *p_ptt,
804                           bool is_release_cmd,
805                           bool is_tx_pq, u16 start_pq, u16 num_pqs)
806 {
807         u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
808         u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
809
810         /* Set command's PQ type */
811         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
812
813         for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
814                 /* Set PQ bit in mask (stop command only) */
815                 if (!is_release_cmd)
816                         pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
817
818                 /* If last PQ or end of PQ mask, write command */
819                 if ((pq_id == last_pq) ||
820                     (pq_id % QM_STOP_PQ_MASK_WIDTH ==
821                      (QM_STOP_PQ_MASK_WIDTH - 1))) {
822                         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
823                                          PAUSE_MASK, pq_mask);
824                         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
825                                          GROUP_ID,
826                                          pq_id / QM_STOP_PQ_MASK_WIDTH);
827                         if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
828                                              cmd_arr[0], cmd_arr[1]))
829                                 return false;
830                         pq_mask = 0;
831                 }
832         }
833
834         return true;
835 }
836
837 static void
838 qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
839 {
840         if (enable)
841                 set_bit(bit, var);
842         else
843                 clear_bit(bit, var);
844 }
845
846 #define PRS_ETH_TUNN_FIC_FORMAT -188897008
847
848 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
849                              struct qed_ptt *p_ptt, u16 dest_port)
850 {
851         qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
852         qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
853         qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
854 }
855
856 void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
857                           struct qed_ptt *p_ptt, bool vxlan_enable)
858 {
859         unsigned long reg_val = 0;
860         u8 shift;
861
862         reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
863         shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
864         qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
865
866         qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
867
868         if (reg_val)
869                 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
870                        PRS_ETH_TUNN_FIC_FORMAT);
871
872         reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
873         shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
874         qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
875
876         qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
877
878         qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
879                vxlan_enable ? 1 : 0);
880 }
881
882 void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
883                         bool eth_gre_enable, bool ip_gre_enable)
884 {
885         unsigned long reg_val = 0;
886         u8 shift;
887
888         reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
889         shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
890         qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
891
892         shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
893         qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
894         qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
895         if (reg_val)
896                 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
897                        PRS_ETH_TUNN_FIC_FORMAT);
898
899         reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
900         shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
901         qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
902
903         shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
904         qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
905         qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
906
907         qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
908                eth_gre_enable ? 1 : 0);
909         qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
910                ip_gre_enable ? 1 : 0);
911 }
912
913 void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
914                               struct qed_ptt *p_ptt, u16 dest_port)
915 {
916         qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
917         qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
918         qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
919 }
920
921 void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
922                            struct qed_ptt *p_ptt,
923                            bool eth_geneve_enable, bool ip_geneve_enable)
924 {
925         unsigned long reg_val = 0;
926         u8 shift;
927
928         reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
929         shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
930         qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
931
932         shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
933         qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
934
935         qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
936         if (reg_val)
937                 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
938                        PRS_ETH_TUNN_FIC_FORMAT);
939
940         qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
941                eth_geneve_enable ? 1 : 0);
942         qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
943
944         /* EDPM with geneve tunnel not supported in BB_B0 */
945         if (QED_IS_BB_B0(p_hwfn->cdev))
946                 return;
947
948         qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
949                eth_geneve_enable ? 1 : 0);
950         qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
951                ip_geneve_enable ? 1 : 0);
952 }
953
954 #define T_ETH_PACKET_ACTION_GFT_EVENTID  23
955 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR  272
956 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
957 #define PARSER_ETH_CONN_CM_HDR 0
958 #define CAM_LINE_SIZE sizeof(u32)
959 #define RAM_LINE_SIZE sizeof(u64)
960 #define REG_SIZE sizeof(u32)
961
962 void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
963                               struct qed_ptt *p_ptt, u16 pf_id)
964 {
965         u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM +
966                       pf_id * RAM_LINE_SIZE;
967
968         /*stop using gft logic */
969         qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
970         qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
971         qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
972         qed_wr(p_hwfn, p_ptt, hw_addr, 0);
973         qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0);
974 }
975
976 void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
977                              u16 pf_id, bool tcp, bool udp,
978                              bool ipv4, bool ipv6)
979 {
980         union gft_cam_line_union camline;
981         struct gft_ram_line ramline;
982         u32 rfs_cm_hdr_event_id;
983
984         rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
985
986         if (!ipv6 && !ipv4)
987                 DP_NOTICE(p_hwfn,
988                           "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
989         if (!tcp && !udp)
990                 DP_NOTICE(p_hwfn,
991                           "set_rfs_mode_enable: must accept at least on of - udp or tcp");
992
993         rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
994                                         PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
995         rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
996                                         PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
997         qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
998
999         /* Configure Registers for RFS mode */
1000         qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1001         qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1002         camline.cam_line_mapped.camline = 0;
1003
1004         /* Cam line is now valid!! */
1005         SET_FIELD(camline.cam_line_mapped.camline,
1006                   GFT_CAM_LINE_MAPPED_VALID, 1);
1007
1008         /* filters are per PF!! */
1009         SET_FIELD(camline.cam_line_mapped.camline,
1010                   GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1011                   GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1012         SET_FIELD(camline.cam_line_mapped.camline,
1013                   GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1014         if (!(tcp && udp)) {
1015                 SET_FIELD(camline.cam_line_mapped.camline,
1016                           GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1017                           GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1018                 if (tcp)
1019                         SET_FIELD(camline.cam_line_mapped.camline,
1020                                   GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1021                                   GFT_PROFILE_TCP_PROTOCOL);
1022                 else
1023                         SET_FIELD(camline.cam_line_mapped.camline,
1024                                   GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1025                                   GFT_PROFILE_UDP_PROTOCOL);
1026         }
1027
1028         if (!(ipv4 && ipv6)) {
1029                 SET_FIELD(camline.cam_line_mapped.camline,
1030                           GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1031                 if (ipv4)
1032                         SET_FIELD(camline.cam_line_mapped.camline,
1033                                   GFT_CAM_LINE_MAPPED_IP_VERSION,
1034                                   GFT_PROFILE_IPV4);
1035                 else
1036                         SET_FIELD(camline.cam_line_mapped.camline,
1037                                   GFT_CAM_LINE_MAPPED_IP_VERSION,
1038                                   GFT_PROFILE_IPV6);
1039         }
1040
1041         /* Write characteristics to cam */
1042         qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1043                camline.cam_line_mapped.camline);
1044         camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
1045                                                  PRS_REG_GFT_CAM +
1046                                                  CAM_LINE_SIZE * pf_id);
1047
1048         /* Write line to RAM - compare to filter 4 tuple */
1049         ramline.lo = 0;
1050         ramline.hi = 0;
1051         SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1);
1052         SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1);
1053         SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1054         SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1);
1055         SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1);
1056         SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1);
1057
1058         /* Each iteration write to reg */
1059         qed_wr(p_hwfn, p_ptt,
1060                PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1061                ramline.lo);
1062         qed_wr(p_hwfn, p_ptt,
1063                PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4,
1064                ramline.hi);
1065
1066         /* Set default profile so that no filter match will happen */
1067         qed_wr(p_hwfn, p_ptt,
1068                PRS_REG_GFT_PROFILE_MASK_RAM +
1069                RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH,
1070                ramline.lo);
1071         qed_wr(p_hwfn, p_ptt,
1072                PRS_REG_GFT_PROFILE_MASK_RAM +
1073                RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4,
1074                ramline.hi);
1075 }