1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include "hclge_main.h"
10 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
15 for (i = 0; i < HNAE3_MAX_TC; i++) {
16 switch (ets->tc_tsa[i]) {
17 case IEEE_8021QAZ_TSA_STRICT:
18 hdev->tm_info.tc_info[i].tc_sch_mode =
20 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
22 case IEEE_8021QAZ_TSA_ETS:
23 hdev->tm_info.tc_info[i].tc_sch_mode =
25 hdev->tm_info.pg_info[0].tc_dwrr[i] =
29 /* Hardware only supports SP (strict priority)
30 * or ETS (enhanced transmission selection)
31 * algorithms, if we receive some other value
32 * from dcbnl, then throw an error.
38 return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
41 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
46 memset(ets, 0, sizeof(*ets));
48 ets->ets_cap = hdev->tc_max;
50 for (i = 0; i < HNAE3_MAX_TC; i++) {
51 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
52 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
54 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
56 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
58 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
63 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
65 struct hclge_vport *vport = hclge_get_vport(h);
66 struct hclge_dev *hdev = vport->back;
68 hclge_tm_info_to_ieee_ets(hdev, ets);
73 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
74 u8 *tc, bool *changed)
76 bool has_ets_tc = false;
81 for (i = 0; i < HNAE3_MAX_TC; i++) {
82 if (ets->prio_tc[i] >= hdev->tc_max ||
86 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
89 if (ets->prio_tc[i] > max_tc)
90 max_tc = ets->prio_tc[i];
92 switch (ets->tc_tsa[i]) {
93 case IEEE_8021QAZ_TSA_STRICT:
94 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
98 case IEEE_8021QAZ_TSA_ETS:
99 /* The hardware will switch to sp mode if bandwidth is
100 * 0, so limit ets bandwidth must be greater than 0.
102 if (!ets->tc_tx_bw[i]) {
103 dev_err(&hdev->pdev->dev,
104 "tc%u ets bw cannot be 0\n", i);
108 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
112 total_ets_bw += ets->tc_tx_bw[i];
120 if (has_ets_tc && total_ets_bw != BW_PERCENT)
124 if (*tc != hdev->tm_info.num_tc)
130 static int hclge_map_update(struct hnae3_handle *h)
132 struct hclge_vport *vport = hclge_get_vport(h);
133 struct hclge_dev *hdev = vport->back;
136 ret = hclge_tm_map_cfg(hdev);
140 ret = hclge_tm_schd_mode_hw(hdev);
144 ret = hclge_pause_setup_hw(hdev);
148 ret = hclge_buffer_alloc(hdev);
152 hclge_rss_indir_init_cfg(hdev);
154 return hclge_rss_init_hw(hdev);
157 static int hclge_client_setup_tc(struct hclge_dev *hdev)
159 struct hclge_vport *vport = hdev->vport;
160 struct hnae3_client *client;
161 struct hnae3_handle *handle;
165 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
166 handle = &vport[i].nic;
167 client = handle->client;
169 if (!client || !client->ops || !client->ops->setup_tc)
172 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
180 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
182 struct hclge_vport *vport = hclge_get_vport(h);
183 struct hclge_dev *hdev = vport->back;
184 bool map_changed = false;
188 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
189 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
192 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
196 hclge_tm_schd_info_update(hdev, num_tc);
198 ret = hclge_ieee_ets_to_tm_info(hdev, ets);
203 ret = hclge_client_setup_tc(hdev);
208 return hclge_tm_dwrr_cfg(hdev);
211 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
213 u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
214 struct hclge_vport *vport = hclge_get_vport(h);
215 struct hclge_dev *hdev = vport->back;
219 memset(pfc, 0, sizeof(*pfc));
220 pfc->pfc_cap = hdev->pfc_max;
221 pfc->pfc_en = hdev->tm_info.pfc_en;
223 ret = hclge_pfc_tx_stats_get(hdev, requests);
227 ret = hclge_pfc_rx_stats_get(hdev, indications);
231 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
232 pfc->requests[i] = requests[i];
233 pfc->indications[i] = indications[i];
238 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
240 struct hclge_vport *vport = hclge_get_vport(h);
241 struct hclge_dev *hdev = vport->back;
242 u8 i, j, pfc_map, *prio_tc;
244 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
245 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
248 if (pfc->pfc_en == hdev->tm_info.pfc_en)
251 prio_tc = hdev->tm_info.prio_tc;
254 for (i = 0; i < hdev->tm_info.num_tc; i++) {
255 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
256 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
263 hdev->tm_info.hw_pfc_map = pfc_map;
264 hdev->tm_info.pfc_en = pfc->pfc_en;
266 return hclge_pause_setup_hw(hdev);
269 /* DCBX configuration */
270 static u8 hclge_getdcbx(struct hnae3_handle *h)
272 struct hclge_vport *vport = hclge_get_vport(h);
273 struct hclge_dev *hdev = vport->back;
275 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
278 return hdev->dcbx_cap;
281 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
283 struct hclge_vport *vport = hclge_get_vport(h);
284 struct hclge_dev *hdev = vport->back;
286 /* No support for LLD_MANAGED modes or CEE */
287 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
288 (mode & DCB_CAP_DCBX_VER_CEE) ||
289 !(mode & DCB_CAP_DCBX_HOST))
292 hdev->dcbx_cap = mode;
297 /* Set up TC for hardware offloaded mqprio in channel mode */
298 static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
300 struct hclge_vport *vport = hclge_get_vport(h);
301 struct hclge_dev *hdev = vport->back;
304 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
307 if (tc > hdev->tc_max) {
308 dev_err(&hdev->pdev->dev,
309 "setup tc failed, tc(%u) > tc_max(%u)\n",
314 hclge_tm_schd_info_update(hdev, tc);
316 ret = hclge_tm_prio_tc_info_update(hdev, prio_tc);
320 ret = hclge_tm_init_hw(hdev);
324 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
327 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
329 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
334 static const struct hnae3_dcb_ops hns3_dcb_ops = {
335 .ieee_getets = hclge_ieee_getets,
336 .ieee_setets = hclge_ieee_setets,
337 .ieee_getpfc = hclge_ieee_getpfc,
338 .ieee_setpfc = hclge_ieee_setpfc,
339 .getdcbx = hclge_getdcbx,
340 .setdcbx = hclge_setdcbx,
341 .map_update = hclge_map_update,
342 .setup_tc = hclge_setup_tc,
345 void hclge_dcb_ops_set(struct hclge_dev *hdev)
347 struct hclge_vport *vport = hdev->vport;
348 struct hnae3_knic_private_info *kinfo;
350 /* Hdev does not support DCB or vport is
351 * not a pf, then dcb_ops is not set.
353 if (!hnae3_dev_dcb_supported(hdev) ||
354 vport->vport_id != 0)
357 kinfo = &vport->nic.kinfo;
358 kinfo->dcb_ops = &hns3_dcb_ops;
359 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;