2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/etherdevice.h>
12 #include "hclge_cmd.h"
13 #include "hclge_main.h"
16 enum hclge_shaper_level {
17 HCLGE_SHAPER_LVL_PRI = 0,
18 HCLGE_SHAPER_LVL_PG = 1,
19 HCLGE_SHAPER_LVL_PORT = 2,
20 HCLGE_SHAPER_LVL_QSET = 3,
21 HCLGE_SHAPER_LVL_CNT = 4,
22 HCLGE_SHAPER_LVL_VF = 0,
23 HCLGE_SHAPER_LVL_PF = 1,
26 #define HCLGE_SHAPER_BS_U_DEF 1
27 #define HCLGE_SHAPER_BS_S_DEF 4
29 #define HCLGE_ETHER_MAX_RATE 100000
31 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
32 * @ir: Rate to be config, its unit is Mbps
33 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
34 * @ir_b: IR_B parameter of IR shaper
35 * @ir_u: IR_U parameter of IR shaper
36 * @ir_s: IR_S parameter of IR shaper
40 * IR_b * (2 ^ IR_u) * 8
41 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
44 * @return: 0: calculate sucessful, negative: fail
46 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
47 u8 *ir_b, u8 *ir_u, u8 *ir_s)
49 const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 6 * 256, /* Prioriy level */
51 6 * 32, /* Prioriy group level */
52 6 * 8, /* Port level */
53 6 * 256 /* Qset level */
55 u8 ir_u_calc = 0, ir_s_calc = 0;
60 if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
61 ir > HCLGE_ETHER_MAX_RATE)
64 tick = tick_array[shaper_level];
67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 * the formula is changed to:
70 * ir_calc = ---------------- * 1000
73 ir_calc = (1008000 + (tick >> 1) - 1) / tick;
81 } else if (ir_calc > ir) {
82 /* Increasing the denominator to select ir_s value */
83 while (ir_calc > ir) {
85 ir_calc = 1008000 / (tick * (1 << ir_s_calc));
91 *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
93 /* Increasing the numerator to select ir_u value */
96 while (ir_calc < ir) {
98 numerator = 1008000 * (1 << ir_u_calc);
99 ir_calc = (numerator + (tick >> 1)) / tick;
105 u32 denominator = (8000 * (1 << --ir_u_calc));
106 *ir_b = (ir * tick + (denominator >> 1)) / denominator;
116 static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
118 struct hclge_desc desc;
120 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
122 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
123 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
125 return hclge_cmd_send(&hdev->hw, &desc, 1);
128 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
132 tc = hdev->tm_info.prio_tc[pri_id];
134 if (tc >= hdev->tm_info.num_tc)
138 * the register for priority has four bytes, the first bytes includes
139 * priority0 and priority1, the higher 4bit stands for priority1
140 * while the lower 4bit stands for priority0, as below:
141 * first byte: | pri_1 | pri_0 |
142 * second byte: | pri_3 | pri_2 |
143 * third byte: | pri_5 | pri_4 |
144 * fourth byte: | pri_7 | pri_6 |
146 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
151 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
153 struct hclge_desc desc;
154 u8 *pri = (u8 *)desc.data;
158 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
160 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
161 ret = hclge_fill_pri_array(hdev, pri, pri_id);
166 return hclge_cmd_send(&hdev->hw, &desc, 1);
169 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
170 u8 pg_id, u8 pri_bit_map)
172 struct hclge_pg_to_pri_link_cmd *map;
173 struct hclge_desc desc;
175 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
177 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
180 map->pri_bit_map = pri_bit_map;
182 return hclge_cmd_send(&hdev->hw, &desc, 1);
185 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
188 struct hclge_qs_to_pri_link_cmd *map;
189 struct hclge_desc desc;
191 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
193 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
195 map->qs_id = cpu_to_le16(qs_id);
197 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
199 return hclge_cmd_send(&hdev->hw, &desc, 1);
202 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
205 struct hclge_nq_to_qs_link_cmd *map;
206 struct hclge_desc desc;
208 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
210 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
212 map->nq_id = cpu_to_le16(q_id);
213 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
215 return hclge_cmd_send(&hdev->hw, &desc, 1);
218 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
221 struct hclge_pg_weight_cmd *weight;
222 struct hclge_desc desc;
224 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
226 weight = (struct hclge_pg_weight_cmd *)desc.data;
228 weight->pg_id = pg_id;
231 return hclge_cmd_send(&hdev->hw, &desc, 1);
234 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
237 struct hclge_priority_weight_cmd *weight;
238 struct hclge_desc desc;
240 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
242 weight = (struct hclge_priority_weight_cmd *)desc.data;
244 weight->pri_id = pri_id;
247 return hclge_cmd_send(&hdev->hw, &desc, 1);
250 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
253 struct hclge_qs_weight_cmd *weight;
254 struct hclge_desc desc;
256 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
258 weight = (struct hclge_qs_weight_cmd *)desc.data;
260 weight->qs_id = cpu_to_le16(qs_id);
263 return hclge_cmd_send(&hdev->hw, &desc, 1);
266 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
267 enum hclge_shap_bucket bucket, u8 pg_id,
268 u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
270 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
271 enum hclge_opcode_type opcode;
272 struct hclge_desc desc;
274 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
275 HCLGE_OPC_TM_PG_C_SHAPPING;
276 hclge_cmd_setup_basic_desc(&desc, opcode, false);
278 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
280 shap_cfg_cmd->pg_id = pg_id;
282 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
283 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
284 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
285 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
286 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
288 return hclge_cmd_send(&hdev->hw, &desc, 1);
291 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
292 enum hclge_shap_bucket bucket, u8 pri_id,
293 u8 ir_b, u8 ir_u, u8 ir_s,
296 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
297 enum hclge_opcode_type opcode;
298 struct hclge_desc desc;
300 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
301 HCLGE_OPC_TM_PRI_C_SHAPPING;
303 hclge_cmd_setup_basic_desc(&desc, opcode, false);
305 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
307 shap_cfg_cmd->pri_id = pri_id;
309 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
310 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
311 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
312 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
313 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
315 return hclge_cmd_send(&hdev->hw, &desc, 1);
318 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
320 struct hclge_desc desc;
322 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
324 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
325 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
329 desc.data[0] = cpu_to_le32(pg_id);
331 return hclge_cmd_send(&hdev->hw, &desc, 1);
334 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
336 struct hclge_desc desc;
338 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
340 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
341 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
345 desc.data[0] = cpu_to_le32(pri_id);
347 return hclge_cmd_send(&hdev->hw, &desc, 1);
350 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id)
352 struct hclge_desc desc;
354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
356 if (hdev->tm_info.tc_info[qs_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
357 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
361 desc.data[0] = cpu_to_le32(qs_id);
363 return hclge_cmd_send(&hdev->hw, &desc, 1);
366 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
368 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
369 struct hclge_desc desc;
371 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
374 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
376 bp_to_qs_map_cmd->tc_id = tc;
378 /* Qset and tc is one by one mapping */
379 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
381 return hclge_cmd_send(&hdev->hw, &desc, 1);
384 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
386 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
387 struct hclge_dev *hdev = vport->back;
390 kinfo = &vport->nic.kinfo;
391 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
393 min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
395 = min_t(u16, hdev->rss_size_max,
396 kinfo->num_tqps / kinfo->num_tc);
397 vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
398 vport->dwrr = 100; /* 100 percent as init */
399 vport->alloc_rss_size = kinfo->rss_size;
401 for (i = 0; i < kinfo->num_tc; i++) {
402 if (hdev->hw_tc_map & BIT(i)) {
403 kinfo->tc_info[i].enable = true;
404 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
405 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
406 kinfo->tc_info[i].tc = i;
408 /* Set to default queue if TC is disable */
409 kinfo->tc_info[i].enable = false;
410 kinfo->tc_info[i].tqp_offset = 0;
411 kinfo->tc_info[i].tqp_count = 1;
412 kinfo->tc_info[i].tc = 0;
416 memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
417 FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
420 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
422 struct hclge_vport *vport = hdev->vport;
425 for (i = 0; i < hdev->num_alloc_vport; i++) {
426 hclge_tm_vport_tc_info_update(vport);
432 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
436 for (i = 0; i < hdev->tm_info.num_tc; i++) {
437 hdev->tm_info.tc_info[i].tc_id = i;
438 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
439 hdev->tm_info.tc_info[i].pgid = 0;
440 hdev->tm_info.tc_info[i].bw_limit =
441 hdev->tm_info.pg_info[0].bw_limit;
444 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
445 hdev->tm_info.prio_tc[i] =
446 (i >= hdev->tm_info.num_tc) ? 0 : i;
448 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
451 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
455 for (i = 0; i < hdev->tm_info.num_pg; i++) {
458 hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
460 hdev->tm_info.pg_info[i].pg_id = i;
461 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
463 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
468 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
469 for (k = 0; k < hdev->tm_info.num_tc; k++)
470 hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
474 static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
476 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
477 (hdev->tm_info.num_pg != 1))
480 hclge_tm_pg_info_init(hdev);
482 hclge_tm_tc_info_init(hdev);
484 hclge_tm_vport_info_update(hdev);
486 hdev->tm_info.fc_mode = HCLGE_FC_NONE;
487 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
492 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
497 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
500 for (i = 0; i < hdev->tm_info.num_pg; i++) {
502 ret = hclge_tm_pg_to_pri_map_cfg(
503 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
511 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
518 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
522 for (i = 0; i < hdev->tm_info.num_pg; i++) {
523 /* Calc shaper para */
524 ret = hclge_shaper_para_calc(
525 hdev->tm_info.pg_info[i].bw_limit,
527 &ir_b, &ir_u, &ir_s);
531 ret = hclge_tm_pg_shapping_cfg(hdev,
532 HCLGE_TM_SHAP_C_BUCKET, i,
533 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
534 HCLGE_SHAPER_BS_S_DEF);
538 ret = hclge_tm_pg_shapping_cfg(hdev,
539 HCLGE_TM_SHAP_P_BUCKET, i,
541 HCLGE_SHAPER_BS_U_DEF,
542 HCLGE_SHAPER_BS_S_DEF);
550 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
556 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
560 for (i = 0; i < hdev->tm_info.num_pg; i++) {
562 ret = hclge_tm_pg_weight_cfg(hdev, i,
563 hdev->tm_info.pg_dwrr[i]);
571 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
572 struct hclge_vport *vport)
574 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
575 struct hnae3_queue **tqp = kinfo->tqp;
576 struct hnae3_tc_info *v_tc_info;
580 for (i = 0; i < kinfo->num_tc; i++) {
581 v_tc_info = &kinfo->tc_info[i];
582 for (j = 0; j < v_tc_info->tqp_count; j++) {
583 struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
585 ret = hclge_tm_q_to_qs_map_cfg(hdev,
586 hclge_get_queue_id(q),
587 vport->qs_offset + i);
596 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
598 struct hclge_vport *vport = hdev->vport;
602 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
603 /* Cfg qs -> pri mapping, one by one mapping */
604 for (i = 0; i < hdev->tm_info.num_tc; i++) {
605 ret = hclge_tm_qs_to_pri_map_cfg(hdev, i, i);
609 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
611 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
612 for (k = 0; k < hdev->num_alloc_vport; k++)
613 for (i = 0; i < HNAE3_MAX_TC; i++) {
614 ret = hclge_tm_qs_to_pri_map_cfg(
615 hdev, vport[k].qs_offset + i, k);
623 /* Cfg q -> qs mapping */
624 for (i = 0; i < hdev->num_alloc_vport; i++) {
625 ret = hclge_vport_q_to_qs_map(hdev, vport);
635 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
641 for (i = 0; i < hdev->tm_info.num_tc; i++) {
642 ret = hclge_shaper_para_calc(
643 hdev->tm_info.tc_info[i].bw_limit,
644 HCLGE_SHAPER_LVL_PRI,
645 &ir_b, &ir_u, &ir_s);
649 ret = hclge_tm_pri_shapping_cfg(
650 hdev, HCLGE_TM_SHAP_C_BUCKET, i,
651 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
652 HCLGE_SHAPER_BS_S_DEF);
656 ret = hclge_tm_pri_shapping_cfg(
657 hdev, HCLGE_TM_SHAP_P_BUCKET, i,
658 ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
659 HCLGE_SHAPER_BS_S_DEF);
667 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
669 struct hclge_dev *hdev = vport->back;
673 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
674 &ir_b, &ir_u, &ir_s);
678 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
680 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
681 HCLGE_SHAPER_BS_S_DEF);
685 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
688 HCLGE_SHAPER_BS_U_DEF,
689 HCLGE_SHAPER_BS_S_DEF);
696 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
698 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
699 struct hclge_dev *hdev = vport->back;
700 struct hnae3_tc_info *v_tc_info;
705 for (i = 0; i < kinfo->num_tc; i++) {
706 v_tc_info = &kinfo->tc_info[i];
707 ret = hclge_shaper_para_calc(
708 hdev->tm_info.tc_info[i].bw_limit,
709 HCLGE_SHAPER_LVL_QSET,
710 &ir_b, &ir_u, &ir_s);
718 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
720 struct hclge_vport *vport = hdev->vport;
724 /* Need config vport shaper */
725 for (i = 0; i < hdev->num_alloc_vport; i++) {
726 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
730 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
740 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
744 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
745 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
749 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
757 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
759 struct hclge_pg_info *pg_info;
764 for (i = 0; i < hdev->tm_info.num_tc; i++) {
766 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
767 dwrr = pg_info->tc_dwrr[i];
769 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
773 ret = hclge_tm_qs_weight_cfg(hdev, i, dwrr);
781 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
783 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
784 struct hclge_dev *hdev = vport->back;
789 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
794 for (i = 0; i < kinfo->num_tc; i++) {
795 ret = hclge_tm_qs_weight_cfg(
796 hdev, vport->qs_offset + i,
797 hdev->tm_info.pg_info[0].tc_dwrr[i]);
805 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
807 struct hclge_vport *vport = hdev->vport;
811 for (i = 0; i < hdev->num_alloc_vport; i++) {
812 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
822 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
826 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
827 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
831 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
839 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
843 ret = hclge_tm_pg_to_pri_map(hdev);
847 return hclge_tm_pri_q_qs_cfg(hdev);
850 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
854 ret = hclge_tm_pg_shaper_cfg(hdev);
858 return hclge_tm_pri_shaper_cfg(hdev);
861 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
865 ret = hclge_tm_pg_dwrr_cfg(hdev);
869 return hclge_tm_pri_dwrr_cfg(hdev);
872 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
877 /* Only being config on TC-Based scheduler mode */
878 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
881 for (i = 0; i < hdev->tm_info.num_pg; i++) {
882 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
890 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
892 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
893 struct hclge_dev *hdev = vport->back;
897 if (vport->vport_id >= HNAE3_MAX_TC)
900 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
904 for (i = 0; i < kinfo->num_tc; i++) {
905 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i);
913 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
915 struct hclge_vport *vport = hdev->vport;
919 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
920 for (i = 0; i < hdev->tm_info.num_tc; i++) {
921 ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
925 ret = hclge_tm_qs_schd_mode_cfg(hdev, i);
930 for (i = 0; i < hdev->num_alloc_vport; i++) {
931 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
942 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
946 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
950 return hclge_tm_lvl34_schd_mode_cfg(hdev);
953 static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
958 ret = hclge_tm_map_cfg(hdev);
963 ret = hclge_tm_shaper_cfg(hdev);
968 ret = hclge_tm_dwrr_cfg(hdev);
972 /* Cfg schd mode for each level schd */
973 return hclge_tm_schd_mode_hw(hdev);
976 int hclge_pause_setup_hw(struct hclge_dev *hdev)
978 bool en = hdev->tm_info.fc_mode != HCLGE_FC_PFC;
982 ret = hclge_mac_pause_en_cfg(hdev, en, en);
986 /* Only DCB-supported dev supports qset back pressure setting */
987 if (!hnae3_dev_dcb_supported(hdev))
990 for (i = 0; i < hdev->tm_info.num_tc; i++) {
991 ret = hclge_tm_qs_bp_cfg(hdev, i);
996 return hclge_up_to_tc_map(hdev);
999 int hclge_tm_init_hw(struct hclge_dev *hdev)
1003 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1004 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1007 ret = hclge_tm_schd_setup_hw(hdev);
1011 ret = hclge_pause_setup_hw(hdev);
1018 int hclge_tm_schd_init(struct hclge_dev *hdev)
1020 int ret = hclge_tm_schd_info_init(hdev);
1025 return hclge_tm_init_hw(hdev);