GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_tm.c
1 /*
2  * Copyright (c) 2016~2017 Hisilicon Limited.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9
10 #include <linux/etherdevice.h>
11
12 #include "hclge_cmd.h"
13 #include "hclge_main.h"
14 #include "hclge_tm.h"
15
16 enum hclge_shaper_level {
17         HCLGE_SHAPER_LVL_PRI    = 0,
18         HCLGE_SHAPER_LVL_PG     = 1,
19         HCLGE_SHAPER_LVL_PORT   = 2,
20         HCLGE_SHAPER_LVL_QSET   = 3,
21         HCLGE_SHAPER_LVL_CNT    = 4,
22         HCLGE_SHAPER_LVL_VF     = 0,
23         HCLGE_SHAPER_LVL_PF     = 1,
24 };
25
26 #define HCLGE_SHAPER_BS_U_DEF   1
27 #define HCLGE_SHAPER_BS_S_DEF   4
28
29 #define HCLGE_ETHER_MAX_RATE    100000
30
31 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
32  * @ir: Rate to be config, its unit is Mbps
33  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
34  * @ir_b: IR_B parameter of IR shaper
35  * @ir_u: IR_U parameter of IR shaper
36  * @ir_s: IR_S parameter of IR shaper
37  *
38  * the formula:
39  *
40  *              IR_b * (2 ^ IR_u) * 8
41  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
42  *              Tick * (2 ^ IR_s)
43  *
44  * @return: 0: calculate sucessful, negative: fail
45  */
46 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
47                                   u8 *ir_b, u8 *ir_u, u8 *ir_s)
48 {
49         const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50                 6 * 256,        /* Prioriy level */
51                 6 * 32,         /* Prioriy group level */
52                 6 * 8,          /* Port level */
53                 6 * 256         /* Qset level */
54         };
55         u8 ir_u_calc = 0, ir_s_calc = 0;
56         u32 ir_calc;
57         u32 tick;
58
59         /* Calc tick */
60         if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
61             ir > HCLGE_ETHER_MAX_RATE)
62                 return -EINVAL;
63
64         tick = tick_array[shaper_level];
65
66         /**
67          * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68          * the formula is changed to:
69          *              126 * 1 * 8
70          * ir_calc = ---------------- * 1000
71          *              tick * 1
72          */
73         ir_calc = (1008000 + (tick >> 1) - 1) / tick;
74
75         if (ir_calc == ir) {
76                 *ir_b = 126;
77                 *ir_u = 0;
78                 *ir_s = 0;
79
80                 return 0;
81         } else if (ir_calc > ir) {
82                 /* Increasing the denominator to select ir_s value */
83                 while (ir_calc > ir) {
84                         ir_s_calc++;
85                         ir_calc = 1008000 / (tick * (1 << ir_s_calc));
86                 }
87
88                 if (ir_calc == ir)
89                         *ir_b = 126;
90                 else
91                         *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
92         } else {
93                 /* Increasing the numerator to select ir_u value */
94                 u32 numerator;
95
96                 while (ir_calc < ir) {
97                         ir_u_calc++;
98                         numerator = 1008000 * (1 << ir_u_calc);
99                         ir_calc = (numerator + (tick >> 1)) / tick;
100                 }
101
102                 if (ir_calc == ir) {
103                         *ir_b = 126;
104                 } else {
105                         u32 denominator = (8000 * (1 << --ir_u_calc));
106                         *ir_b = (ir * tick + (denominator >> 1)) / denominator;
107                 }
108         }
109
110         *ir_u = ir_u_calc;
111         *ir_s = ir_s_calc;
112
113         return 0;
114 }
115
116 static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
117 {
118         struct hclge_desc desc;
119
120         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
121
122         desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
123                 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
124
125         return hclge_cmd_send(&hdev->hw, &desc, 1);
126 }
127
128 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
129 {
130         u8 tc;
131
132         tc = hdev->tm_info.prio_tc[pri_id];
133
134         if (tc >= hdev->tm_info.num_tc)
135                 return -EINVAL;
136
137         /**
138          * the register for priority has four bytes, the first bytes includes
139          *  priority0 and priority1, the higher 4bit stands for priority1
140          *  while the lower 4bit stands for priority0, as below:
141          * first byte:  | pri_1 | pri_0 |
142          * second byte: | pri_3 | pri_2 |
143          * third byte:  | pri_5 | pri_4 |
144          * fourth byte: | pri_7 | pri_6 |
145          */
146         pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
147
148         return 0;
149 }
150
151 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
152 {
153         struct hclge_desc desc;
154         u8 *pri = (u8 *)desc.data;
155         u8 pri_id;
156         int ret;
157
158         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
159
160         for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
161                 ret = hclge_fill_pri_array(hdev, pri, pri_id);
162                 if (ret)
163                         return ret;
164         }
165
166         return hclge_cmd_send(&hdev->hw, &desc, 1);
167 }
168
169 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
170                                       u8 pg_id, u8 pri_bit_map)
171 {
172         struct hclge_pg_to_pri_link_cmd *map;
173         struct hclge_desc desc;
174
175         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
176
177         map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
178
179         map->pg_id = pg_id;
180         map->pri_bit_map = pri_bit_map;
181
182         return hclge_cmd_send(&hdev->hw, &desc, 1);
183 }
184
185 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
186                                       u16 qs_id, u8 pri)
187 {
188         struct hclge_qs_to_pri_link_cmd *map;
189         struct hclge_desc desc;
190
191         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
192
193         map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
194
195         map->qs_id = cpu_to_le16(qs_id);
196         map->priority = pri;
197         map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
198
199         return hclge_cmd_send(&hdev->hw, &desc, 1);
200 }
201
202 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
203                                     u16 q_id, u16 qs_id)
204 {
205         struct hclge_nq_to_qs_link_cmd *map;
206         struct hclge_desc desc;
207
208         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
209
210         map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
211
212         map->nq_id = cpu_to_le16(q_id);
213         map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
214
215         return hclge_cmd_send(&hdev->hw, &desc, 1);
216 }
217
218 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
219                                   u8 dwrr)
220 {
221         struct hclge_pg_weight_cmd *weight;
222         struct hclge_desc desc;
223
224         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
225
226         weight = (struct hclge_pg_weight_cmd *)desc.data;
227
228         weight->pg_id = pg_id;
229         weight->dwrr = dwrr;
230
231         return hclge_cmd_send(&hdev->hw, &desc, 1);
232 }
233
234 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
235                                    u8 dwrr)
236 {
237         struct hclge_priority_weight_cmd *weight;
238         struct hclge_desc desc;
239
240         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
241
242         weight = (struct hclge_priority_weight_cmd *)desc.data;
243
244         weight->pri_id = pri_id;
245         weight->dwrr = dwrr;
246
247         return hclge_cmd_send(&hdev->hw, &desc, 1);
248 }
249
250 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
251                                   u8 dwrr)
252 {
253         struct hclge_qs_weight_cmd *weight;
254         struct hclge_desc desc;
255
256         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
257
258         weight = (struct hclge_qs_weight_cmd *)desc.data;
259
260         weight->qs_id = cpu_to_le16(qs_id);
261         weight->dwrr = dwrr;
262
263         return hclge_cmd_send(&hdev->hw, &desc, 1);
264 }
265
266 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
267                                     enum hclge_shap_bucket bucket, u8 pg_id,
268                                     u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
269 {
270         struct hclge_pg_shapping_cmd *shap_cfg_cmd;
271         enum hclge_opcode_type opcode;
272         struct hclge_desc desc;
273
274         opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
275                 HCLGE_OPC_TM_PG_C_SHAPPING;
276         hclge_cmd_setup_basic_desc(&desc, opcode, false);
277
278         shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
279
280         shap_cfg_cmd->pg_id = pg_id;
281
282         hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
283         hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
284         hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
285         hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
286         hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
287
288         return hclge_cmd_send(&hdev->hw, &desc, 1);
289 }
290
291 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
292                                      enum hclge_shap_bucket bucket, u8 pri_id,
293                                      u8 ir_b, u8 ir_u, u8 ir_s,
294                                      u8 bs_b, u8 bs_s)
295 {
296         struct hclge_pri_shapping_cmd *shap_cfg_cmd;
297         enum hclge_opcode_type opcode;
298         struct hclge_desc desc;
299
300         opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
301                 HCLGE_OPC_TM_PRI_C_SHAPPING;
302
303         hclge_cmd_setup_basic_desc(&desc, opcode, false);
304
305         shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
306
307         shap_cfg_cmd->pri_id = pri_id;
308
309         hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
310         hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
311         hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
312         hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
313         hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
314
315         return hclge_cmd_send(&hdev->hw, &desc, 1);
316 }
317
318 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
319 {
320         struct hclge_desc desc;
321
322         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
323
324         if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
325                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
326         else
327                 desc.data[1] = 0;
328
329         desc.data[0] = cpu_to_le32(pg_id);
330
331         return hclge_cmd_send(&hdev->hw, &desc, 1);
332 }
333
334 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
335 {
336         struct hclge_desc desc;
337
338         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
339
340         if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
341                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
342         else
343                 desc.data[1] = 0;
344
345         desc.data[0] = cpu_to_le32(pri_id);
346
347         return hclge_cmd_send(&hdev->hw, &desc, 1);
348 }
349
350 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id)
351 {
352         struct hclge_desc desc;
353
354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
355
356         if (hdev->tm_info.tc_info[qs_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
357                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
358         else
359                 desc.data[1] = 0;
360
361         desc.data[0] = cpu_to_le32(qs_id);
362
363         return hclge_cmd_send(&hdev->hw, &desc, 1);
364 }
365
366 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
367 {
368         struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
369         struct hclge_desc desc;
370
371         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
372                                    false);
373
374         bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
375
376         bp_to_qs_map_cmd->tc_id = tc;
377
378         /* Qset and tc is one by one mapping */
379         bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
380
381         return hclge_cmd_send(&hdev->hw, &desc, 1);
382 }
383
384 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
385 {
386         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
387         struct hclge_dev *hdev = vport->back;
388         u8 i;
389
390         kinfo = &vport->nic.kinfo;
391         vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
392         kinfo->num_tc =
393                 min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
394         kinfo->rss_size
395                 = min_t(u16, hdev->rss_size_max,
396                         kinfo->num_tqps / kinfo->num_tc);
397         vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
398         vport->dwrr = 100;  /* 100 percent as init */
399         vport->alloc_rss_size = kinfo->rss_size;
400
401         for (i = 0; i < kinfo->num_tc; i++) {
402                 if (hdev->hw_tc_map & BIT(i)) {
403                         kinfo->tc_info[i].enable = true;
404                         kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
405                         kinfo->tc_info[i].tqp_count = kinfo->rss_size;
406                         kinfo->tc_info[i].tc = i;
407                 } else {
408                         /* Set to default queue if TC is disable */
409                         kinfo->tc_info[i].enable = false;
410                         kinfo->tc_info[i].tqp_offset = 0;
411                         kinfo->tc_info[i].tqp_count = 1;
412                         kinfo->tc_info[i].tc = 0;
413                 }
414         }
415
416         memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
417                FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
418 }
419
420 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
421 {
422         struct hclge_vport *vport = hdev->vport;
423         u32 i;
424
425         for (i = 0; i < hdev->num_alloc_vport; i++) {
426                 hclge_tm_vport_tc_info_update(vport);
427
428                 vport++;
429         }
430 }
431
432 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
433 {
434         u8 i;
435
436         for (i = 0; i < hdev->tm_info.num_tc; i++) {
437                 hdev->tm_info.tc_info[i].tc_id = i;
438                 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
439                 hdev->tm_info.tc_info[i].pgid = 0;
440                 hdev->tm_info.tc_info[i].bw_limit =
441                         hdev->tm_info.pg_info[0].bw_limit;
442         }
443
444         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
445                 hdev->tm_info.prio_tc[i] =
446                         (i >= hdev->tm_info.num_tc) ? 0 : i;
447
448         hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
449 }
450
451 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
452 {
453         u8 i;
454
455         for (i = 0; i < hdev->tm_info.num_pg; i++) {
456                 int k;
457
458                 hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
459
460                 hdev->tm_info.pg_info[i].pg_id = i;
461                 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
462
463                 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
464
465                 if (i != 0)
466                         continue;
467
468                 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
469                 for (k = 0; k < hdev->tm_info.num_tc; k++)
470                         hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
471         }
472 }
473
474 static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
475 {
476         if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
477             (hdev->tm_info.num_pg != 1))
478                 return -EINVAL;
479
480         hclge_tm_pg_info_init(hdev);
481
482         hclge_tm_tc_info_init(hdev);
483
484         hclge_tm_vport_info_update(hdev);
485
486         hdev->tm_info.fc_mode = HCLGE_FC_NONE;
487         hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
488
489         return 0;
490 }
491
492 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
493 {
494         int ret;
495         u32 i;
496
497         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
498                 return 0;
499
500         for (i = 0; i < hdev->tm_info.num_pg; i++) {
501                 /* Cfg mapping */
502                 ret = hclge_tm_pg_to_pri_map_cfg(
503                         hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
504                 if (ret)
505                         return ret;
506         }
507
508         return 0;
509 }
510
511 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
512 {
513         u8 ir_u, ir_b, ir_s;
514         int ret;
515         u32 i;
516
517         /* Cfg pg schd */
518         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
519                 return 0;
520
521         /* Pg to pri */
522         for (i = 0; i < hdev->tm_info.num_pg; i++) {
523                 /* Calc shaper para */
524                 ret = hclge_shaper_para_calc(
525                                         hdev->tm_info.pg_info[i].bw_limit,
526                                         HCLGE_SHAPER_LVL_PG,
527                                         &ir_b, &ir_u, &ir_s);
528                 if (ret)
529                         return ret;
530
531                 ret = hclge_tm_pg_shapping_cfg(hdev,
532                                                HCLGE_TM_SHAP_C_BUCKET, i,
533                                                0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
534                                                HCLGE_SHAPER_BS_S_DEF);
535                 if (ret)
536                         return ret;
537
538                 ret = hclge_tm_pg_shapping_cfg(hdev,
539                                                HCLGE_TM_SHAP_P_BUCKET, i,
540                                                ir_b, ir_u, ir_s,
541                                                HCLGE_SHAPER_BS_U_DEF,
542                                                HCLGE_SHAPER_BS_S_DEF);
543                 if (ret)
544                         return ret;
545         }
546
547         return 0;
548 }
549
550 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
551 {
552         int ret;
553         u32 i;
554
555         /* cfg pg schd */
556         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
557                 return 0;
558
559         /* pg to prio */
560         for (i = 0; i < hdev->tm_info.num_pg; i++) {
561                 /* Cfg dwrr */
562                 ret = hclge_tm_pg_weight_cfg(hdev, i,
563                                              hdev->tm_info.pg_dwrr[i]);
564                 if (ret)
565                         return ret;
566         }
567
568         return 0;
569 }
570
571 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
572                                    struct hclge_vport *vport)
573 {
574         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
575         struct hnae3_queue **tqp = kinfo->tqp;
576         struct hnae3_tc_info *v_tc_info;
577         u32 i, j;
578         int ret;
579
580         for (i = 0; i < kinfo->num_tc; i++) {
581                 v_tc_info = &kinfo->tc_info[i];
582                 for (j = 0; j < v_tc_info->tqp_count; j++) {
583                         struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
584
585                         ret = hclge_tm_q_to_qs_map_cfg(hdev,
586                                                        hclge_get_queue_id(q),
587                                                        vport->qs_offset + i);
588                         if (ret)
589                                 return ret;
590                 }
591         }
592
593         return 0;
594 }
595
596 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
597 {
598         struct hclge_vport *vport = hdev->vport;
599         int ret;
600         u32 i;
601
602         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
603                 /* Cfg qs -> pri mapping, one by one mapping */
604                 for (i = 0; i < hdev->tm_info.num_tc; i++) {
605                         ret = hclge_tm_qs_to_pri_map_cfg(hdev, i, i);
606                         if (ret)
607                                 return ret;
608                 }
609         } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
610                 int k;
611                 /* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
612                 for (k = 0; k < hdev->num_alloc_vport; k++)
613                         for (i = 0; i < HNAE3_MAX_TC; i++) {
614                                 ret = hclge_tm_qs_to_pri_map_cfg(
615                                         hdev, vport[k].qs_offset + i, k);
616                                 if (ret)
617                                         return ret;
618                         }
619         } else {
620                 return -EINVAL;
621         }
622
623         /* Cfg q -> qs mapping */
624         for (i = 0; i < hdev->num_alloc_vport; i++) {
625                 ret = hclge_vport_q_to_qs_map(hdev, vport);
626                 if (ret)
627                         return ret;
628
629                 vport++;
630         }
631
632         return 0;
633 }
634
635 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
636 {
637         u8 ir_u, ir_b, ir_s;
638         int ret;
639         u32 i;
640
641         for (i = 0; i < hdev->tm_info.num_tc; i++) {
642                 ret = hclge_shaper_para_calc(
643                                         hdev->tm_info.tc_info[i].bw_limit,
644                                         HCLGE_SHAPER_LVL_PRI,
645                                         &ir_b, &ir_u, &ir_s);
646                 if (ret)
647                         return ret;
648
649                 ret = hclge_tm_pri_shapping_cfg(
650                         hdev, HCLGE_TM_SHAP_C_BUCKET, i,
651                         0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
652                         HCLGE_SHAPER_BS_S_DEF);
653                 if (ret)
654                         return ret;
655
656                 ret = hclge_tm_pri_shapping_cfg(
657                         hdev, HCLGE_TM_SHAP_P_BUCKET, i,
658                         ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
659                         HCLGE_SHAPER_BS_S_DEF);
660                 if (ret)
661                         return ret;
662         }
663
664         return 0;
665 }
666
667 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
668 {
669         struct hclge_dev *hdev = vport->back;
670         u8 ir_u, ir_b, ir_s;
671         int ret;
672
673         ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
674                                      &ir_b, &ir_u, &ir_s);
675         if (ret)
676                 return ret;
677
678         ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
679                                         vport->vport_id,
680                                         0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
681                                         HCLGE_SHAPER_BS_S_DEF);
682         if (ret)
683                 return ret;
684
685         ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
686                                         vport->vport_id,
687                                         ir_b, ir_u, ir_s,
688                                         HCLGE_SHAPER_BS_U_DEF,
689                                         HCLGE_SHAPER_BS_S_DEF);
690         if (ret)
691                 return ret;
692
693         return 0;
694 }
695
696 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
697 {
698         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
699         struct hclge_dev *hdev = vport->back;
700         struct hnae3_tc_info *v_tc_info;
701         u8 ir_u, ir_b, ir_s;
702         u32 i;
703         int ret;
704
705         for (i = 0; i < kinfo->num_tc; i++) {
706                 v_tc_info = &kinfo->tc_info[i];
707                 ret = hclge_shaper_para_calc(
708                                         hdev->tm_info.tc_info[i].bw_limit,
709                                         HCLGE_SHAPER_LVL_QSET,
710                                         &ir_b, &ir_u, &ir_s);
711                 if (ret)
712                         return ret;
713         }
714
715         return 0;
716 }
717
718 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
719 {
720         struct hclge_vport *vport = hdev->vport;
721         int ret;
722         u32 i;
723
724         /* Need config vport shaper */
725         for (i = 0; i < hdev->num_alloc_vport; i++) {
726                 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
727                 if (ret)
728                         return ret;
729
730                 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
731                 if (ret)
732                         return ret;
733
734                 vport++;
735         }
736
737         return 0;
738 }
739
740 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
741 {
742         int ret;
743
744         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
745                 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
746                 if (ret)
747                         return ret;
748         } else {
749                 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
750                 if (ret)
751                         return ret;
752         }
753
754         return 0;
755 }
756
757 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
758 {
759         struct hclge_pg_info *pg_info;
760         u8 dwrr;
761         int ret;
762         u32 i;
763
764         for (i = 0; i < hdev->tm_info.num_tc; i++) {
765                 pg_info =
766                         &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
767                 dwrr = pg_info->tc_dwrr[i];
768
769                 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
770                 if (ret)
771                         return ret;
772
773                 ret = hclge_tm_qs_weight_cfg(hdev, i, dwrr);
774                 if (ret)
775                         return ret;
776         }
777
778         return 0;
779 }
780
781 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
782 {
783         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
784         struct hclge_dev *hdev = vport->back;
785         int ret;
786         u8 i;
787
788         /* Vf dwrr */
789         ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
790         if (ret)
791                 return ret;
792
793         /* Qset dwrr */
794         for (i = 0; i < kinfo->num_tc; i++) {
795                 ret = hclge_tm_qs_weight_cfg(
796                         hdev, vport->qs_offset + i,
797                         hdev->tm_info.pg_info[0].tc_dwrr[i]);
798                 if (ret)
799                         return ret;
800         }
801
802         return 0;
803 }
804
805 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
806 {
807         struct hclge_vport *vport = hdev->vport;
808         int ret;
809         u32 i;
810
811         for (i = 0; i < hdev->num_alloc_vport; i++) {
812                 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
813                 if (ret)
814                         return ret;
815
816                 vport++;
817         }
818
819         return 0;
820 }
821
822 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
823 {
824         int ret;
825
826         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
827                 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
828                 if (ret)
829                         return ret;
830         } else {
831                 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
832                 if (ret)
833                         return ret;
834         }
835
836         return 0;
837 }
838
839 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
840 {
841         int ret;
842
843         ret = hclge_tm_pg_to_pri_map(hdev);
844         if (ret)
845                 return ret;
846
847         return hclge_tm_pri_q_qs_cfg(hdev);
848 }
849
850 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
851 {
852         int ret;
853
854         ret = hclge_tm_pg_shaper_cfg(hdev);
855         if (ret)
856                 return ret;
857
858         return hclge_tm_pri_shaper_cfg(hdev);
859 }
860
861 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
862 {
863         int ret;
864
865         ret = hclge_tm_pg_dwrr_cfg(hdev);
866         if (ret)
867                 return ret;
868
869         return hclge_tm_pri_dwrr_cfg(hdev);
870 }
871
872 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
873 {
874         int ret;
875         u8 i;
876
877         /* Only being config on TC-Based scheduler mode */
878         if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
879                 return 0;
880
881         for (i = 0; i < hdev->tm_info.num_pg; i++) {
882                 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
883                 if (ret)
884                         return ret;
885         }
886
887         return 0;
888 }
889
890 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
891 {
892         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
893         struct hclge_dev *hdev = vport->back;
894         int ret;
895         u8 i;
896
897         if (vport->vport_id >= HNAE3_MAX_TC)
898                 return -EINVAL;
899
900         ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
901         if (ret)
902                 return ret;
903
904         for (i = 0; i < kinfo->num_tc; i++) {
905                 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i);
906                 if (ret)
907                         return ret;
908         }
909
910         return 0;
911 }
912
913 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
914 {
915         struct hclge_vport *vport = hdev->vport;
916         int ret;
917         u8 i;
918
919         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
920                 for (i = 0; i < hdev->tm_info.num_tc; i++) {
921                         ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
922                         if (ret)
923                                 return ret;
924
925                         ret = hclge_tm_qs_schd_mode_cfg(hdev, i);
926                         if (ret)
927                                 return ret;
928                 }
929         } else {
930                 for (i = 0; i < hdev->num_alloc_vport; i++) {
931                         ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
932                         if (ret)
933                                 return ret;
934
935                         vport++;
936                 }
937         }
938
939         return 0;
940 }
941
942 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
943 {
944         int ret;
945
946         ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
947         if (ret)
948                 return ret;
949
950         return hclge_tm_lvl34_schd_mode_cfg(hdev);
951 }
952
953 static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
954 {
955         int ret;
956
957         /* Cfg tm mapping  */
958         ret = hclge_tm_map_cfg(hdev);
959         if (ret)
960                 return ret;
961
962         /* Cfg tm shaper */
963         ret = hclge_tm_shaper_cfg(hdev);
964         if (ret)
965                 return ret;
966
967         /* Cfg dwrr */
968         ret = hclge_tm_dwrr_cfg(hdev);
969         if (ret)
970                 return ret;
971
972         /* Cfg schd mode for each level schd */
973         return hclge_tm_schd_mode_hw(hdev);
974 }
975
976 int hclge_pause_setup_hw(struct hclge_dev *hdev)
977 {
978         bool en = hdev->tm_info.fc_mode != HCLGE_FC_PFC;
979         int ret;
980         u8 i;
981
982         ret = hclge_mac_pause_en_cfg(hdev, en, en);
983         if (ret)
984                 return ret;
985
986         /* Only DCB-supported dev supports qset back pressure setting */
987         if (!hnae3_dev_dcb_supported(hdev))
988                 return 0;
989
990         for (i = 0; i < hdev->tm_info.num_tc; i++) {
991                 ret = hclge_tm_qs_bp_cfg(hdev, i);
992                 if (ret)
993                         return ret;
994         }
995
996         return hclge_up_to_tc_map(hdev);
997 }
998
999 int hclge_tm_init_hw(struct hclge_dev *hdev)
1000 {
1001         int ret;
1002
1003         if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1004             (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1005                 return -ENOTSUPP;
1006
1007         ret = hclge_tm_schd_setup_hw(hdev);
1008         if (ret)
1009                 return ret;
1010
1011         ret = hclge_pause_setup_hw(hdev);
1012         if (ret)
1013                 return ret;
1014
1015         return 0;
1016 }
1017
1018 int hclge_tm_schd_init(struct hclge_dev *hdev)
1019 {
1020         int ret = hclge_tm_schd_info_init(hdev);
1021
1022         if (ret)
1023                 return ret;
1024
1025         return hclge_tm_init_hw(hdev);
1026 }