1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <net/rtnetlink.h>
16 #include "hclge_cmd.h"
17 #include "hclge_dcb.h"
18 #include "hclge_main.h"
19 #include "hclge_mbx.h"
20 #include "hclge_mdio.h"
24 #define HCLGE_NAME "hclge"
25 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
26 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
27 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
28 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
30 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
31 enum hclge_mta_dmac_sel_type mta_mac_sel,
33 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
34 static int hclge_init_vlan_config(struct hclge_dev *hdev);
35 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
37 static struct hnae3_ae_algo ae_algo;
39 static const struct pci_device_id ae_algo_pci_tbl[] = {
40 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
41 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
47 /* required last entry */
51 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
53 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
55 "Serdes Loopback test",
59 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
60 {"igu_rx_oversize_pkt",
61 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
62 {"igu_rx_undersize_pkt",
63 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
64 {"igu_rx_out_all_pkt",
65 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
72 {"egu_tx_out_all_pkt",
73 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
75 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
77 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
80 {"ssu_ppp_mac_key_num",
81 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
82 {"ssu_ppp_host_key_num",
83 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
84 {"ppp_ssu_mac_rlt_num",
85 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
86 {"ppp_ssu_host_rlt_num",
87 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
89 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
98 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
100 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
101 {"igu_rx_no_eof_pkt",
102 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
103 {"igu_rx_no_sof_pkt",
104 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
106 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
107 {"ssu_full_drop_num",
108 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
109 {"ssu_part_drop_num",
110 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
112 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
114 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
116 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
118 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
120 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
122 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
123 {"qcn_fb_invaild_cnt",
124 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
125 {"rx_packet_tc0_in_cnt",
126 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
127 {"rx_packet_tc1_in_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
129 {"rx_packet_tc2_in_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
131 {"rx_packet_tc3_in_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
133 {"rx_packet_tc4_in_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
135 {"rx_packet_tc5_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
137 {"rx_packet_tc6_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
139 {"rx_packet_tc7_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
141 {"rx_packet_tc0_out_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
143 {"rx_packet_tc1_out_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
145 {"rx_packet_tc2_out_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
147 {"rx_packet_tc3_out_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
149 {"rx_packet_tc4_out_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
151 {"rx_packet_tc5_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
153 {"rx_packet_tc6_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
155 {"rx_packet_tc7_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
157 {"tx_packet_tc0_in_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
159 {"tx_packet_tc1_in_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
161 {"tx_packet_tc2_in_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
163 {"tx_packet_tc3_in_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
165 {"tx_packet_tc4_in_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
167 {"tx_packet_tc5_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
169 {"tx_packet_tc6_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
171 {"tx_packet_tc7_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
173 {"tx_packet_tc0_out_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
175 {"tx_packet_tc1_out_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
177 {"tx_packet_tc2_out_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
179 {"tx_packet_tc3_out_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
181 {"tx_packet_tc4_out_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
183 {"tx_packet_tc5_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
185 {"tx_packet_tc6_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
187 {"tx_packet_tc7_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
189 {"pkt_curr_buf_tc0_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
191 {"pkt_curr_buf_tc1_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
193 {"pkt_curr_buf_tc2_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
195 {"pkt_curr_buf_tc3_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
197 {"pkt_curr_buf_tc4_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
199 {"pkt_curr_buf_tc5_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
201 {"pkt_curr_buf_tc6_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
203 {"pkt_curr_buf_tc7_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
206 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
207 {"lo_pri_unicast_rlt_drop_num",
208 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
209 {"hi_pri_multicast_rlt_drop_num",
210 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
211 {"lo_pri_multicast_rlt_drop_num",
212 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
213 {"rx_oq_drop_pkt_cnt",
214 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
215 {"tx_oq_drop_pkt_cnt",
216 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
217 {"nic_l2_err_drop_pkt_cnt",
218 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
219 {"roc_l2_err_drop_pkt_cnt",
220 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
223 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
224 {"mac_tx_mac_pause_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
226 {"mac_rx_mac_pause_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
228 {"mac_tx_pfc_pri0_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
230 {"mac_tx_pfc_pri1_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
232 {"mac_tx_pfc_pri2_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
234 {"mac_tx_pfc_pri3_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
236 {"mac_tx_pfc_pri4_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
238 {"mac_tx_pfc_pri5_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
240 {"mac_tx_pfc_pri6_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
242 {"mac_tx_pfc_pri7_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
244 {"mac_rx_pfc_pri0_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
246 {"mac_rx_pfc_pri1_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
248 {"mac_rx_pfc_pri2_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
250 {"mac_rx_pfc_pri3_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
252 {"mac_rx_pfc_pri4_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
254 {"mac_rx_pfc_pri5_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
256 {"mac_rx_pfc_pri6_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
258 {"mac_rx_pfc_pri7_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
260 {"mac_tx_total_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
262 {"mac_tx_total_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
264 {"mac_tx_good_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
266 {"mac_tx_bad_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
268 {"mac_tx_good_oct_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
270 {"mac_tx_bad_oct_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
272 {"mac_tx_uni_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
274 {"mac_tx_multi_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
276 {"mac_tx_broad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
278 {"mac_tx_undersize_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
280 {"mac_tx_oversize_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
282 {"mac_tx_64_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
284 {"mac_tx_65_127_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
286 {"mac_tx_128_255_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
288 {"mac_tx_256_511_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
290 {"mac_tx_512_1023_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
292 {"mac_tx_1024_1518_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
294 {"mac_tx_1519_2047_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
296 {"mac_tx_2048_4095_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
298 {"mac_tx_4096_8191_oct_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
300 {"mac_tx_8192_9216_oct_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
302 {"mac_tx_9217_12287_oct_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
304 {"mac_tx_12288_16383_oct_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
306 {"mac_tx_1519_max_good_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
308 {"mac_tx_1519_max_bad_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
310 {"mac_rx_total_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
312 {"mac_rx_total_oct_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
314 {"mac_rx_good_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
316 {"mac_rx_bad_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
318 {"mac_rx_good_oct_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
320 {"mac_rx_bad_oct_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
322 {"mac_rx_uni_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
324 {"mac_rx_multi_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
326 {"mac_rx_broad_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
328 {"mac_rx_undersize_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
330 {"mac_rx_oversize_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
332 {"mac_rx_64_oct_pkt_num",
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
334 {"mac_rx_65_127_oct_pkt_num",
335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
336 {"mac_rx_128_255_oct_pkt_num",
337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
338 {"mac_rx_256_511_oct_pkt_num",
339 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
340 {"mac_rx_512_1023_oct_pkt_num",
341 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
342 {"mac_rx_1024_1518_oct_pkt_num",
343 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
344 {"mac_rx_1519_2047_oct_pkt_num",
345 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
346 {"mac_rx_2048_4095_oct_pkt_num",
347 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
348 {"mac_rx_4096_8191_oct_pkt_num",
349 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
350 {"mac_rx_8192_9216_oct_pkt_num",
351 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
352 {"mac_rx_9217_12287_oct_pkt_num",
353 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
354 {"mac_rx_12288_16383_oct_pkt_num",
355 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
356 {"mac_rx_1519_max_good_pkt_num",
357 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
358 {"mac_rx_1519_max_bad_pkt_num",
359 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
361 {"mac_tx_fragment_pkt_num",
362 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
363 {"mac_tx_undermin_pkt_num",
364 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
365 {"mac_tx_jabber_pkt_num",
366 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
367 {"mac_tx_err_all_pkt_num",
368 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
369 {"mac_tx_from_app_good_pkt_num",
370 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
371 {"mac_tx_from_app_bad_pkt_num",
372 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
373 {"mac_rx_fragment_pkt_num",
374 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
375 {"mac_rx_undermin_pkt_num",
376 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
377 {"mac_rx_jabber_pkt_num",
378 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
379 {"mac_rx_fcs_err_pkt_num",
380 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
381 {"mac_rx_send_app_good_pkt_num",
382 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
383 {"mac_rx_send_app_bad_pkt_num",
384 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
387 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
389 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
390 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
391 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
392 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
393 .i_port_bitmap = 0x1,
397 static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
399 #define HCLGE_64_BIT_CMD_NUM 5
400 #define HCLGE_64_BIT_RTN_DATANUM 4
401 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
402 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
407 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
408 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
410 dev_err(&hdev->pdev->dev,
411 "Get 64 bit pkt stats fail, status = %d.\n", ret);
415 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
416 if (unlikely(i == 0)) {
417 desc_data = (__le64 *)(&desc[i].data[0]);
418 n = HCLGE_64_BIT_RTN_DATANUM - 1;
420 desc_data = (__le64 *)(&desc[i]);
421 n = HCLGE_64_BIT_RTN_DATANUM;
423 for (k = 0; k < n; k++) {
424 *data++ += le64_to_cpu(*desc_data);
432 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
434 stats->pkt_curr_buf_cnt = 0;
435 stats->pkt_curr_buf_tc0_cnt = 0;
436 stats->pkt_curr_buf_tc1_cnt = 0;
437 stats->pkt_curr_buf_tc2_cnt = 0;
438 stats->pkt_curr_buf_tc3_cnt = 0;
439 stats->pkt_curr_buf_tc4_cnt = 0;
440 stats->pkt_curr_buf_tc5_cnt = 0;
441 stats->pkt_curr_buf_tc6_cnt = 0;
442 stats->pkt_curr_buf_tc7_cnt = 0;
445 static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
447 #define HCLGE_32_BIT_CMD_NUM 8
448 #define HCLGE_32_BIT_RTN_DATANUM 8
450 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
451 struct hclge_32_bit_stats *all_32_bit_stats;
457 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
458 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
460 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
461 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
463 dev_err(&hdev->pdev->dev,
464 "Get 32 bit pkt stats fail, status = %d.\n", ret);
469 hclge_reset_partial_32bit_counter(all_32_bit_stats);
470 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
471 if (unlikely(i == 0)) {
472 __le16 *desc_data_16bit;
474 all_32_bit_stats->igu_rx_err_pkt +=
475 le32_to_cpu(desc[i].data[0]);
477 desc_data_16bit = (__le16 *)&desc[i].data[1];
478 all_32_bit_stats->igu_rx_no_eof_pkt +=
479 le16_to_cpu(*desc_data_16bit);
482 all_32_bit_stats->igu_rx_no_sof_pkt +=
483 le16_to_cpu(*desc_data_16bit);
485 desc_data = &desc[i].data[2];
486 n = HCLGE_32_BIT_RTN_DATANUM - 4;
488 desc_data = (__le32 *)&desc[i];
489 n = HCLGE_32_BIT_RTN_DATANUM;
491 for (k = 0; k < n; k++) {
492 *data++ += le32_to_cpu(*desc_data);
500 static int hclge_mac_update_stats(struct hclge_dev *hdev)
502 #define HCLGE_MAC_CMD_NUM 21
503 #define HCLGE_RTN_DATA_NUM 4
505 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
506 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
511 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
512 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
514 dev_err(&hdev->pdev->dev,
515 "Get MAC pkt stats fail, status = %d.\n", ret);
520 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
521 if (unlikely(i == 0)) {
522 desc_data = (__le64 *)(&desc[i].data[0]);
523 n = HCLGE_RTN_DATA_NUM - 2;
525 desc_data = (__le64 *)(&desc[i]);
526 n = HCLGE_RTN_DATA_NUM;
528 for (k = 0; k < n; k++) {
529 *data++ += le64_to_cpu(*desc_data);
537 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
539 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
540 struct hclge_vport *vport = hclge_get_vport(handle);
541 struct hclge_dev *hdev = vport->back;
542 struct hnae3_queue *queue;
543 struct hclge_desc desc[1];
544 struct hclge_tqp *tqp;
547 for (i = 0; i < kinfo->num_tqps; i++) {
548 queue = handle->kinfo.tqp[i];
549 tqp = container_of(queue, struct hclge_tqp, q);
550 /* command : HCLGE_OPC_QUERY_IGU_STAT */
551 hclge_cmd_setup_basic_desc(&desc[0],
552 HCLGE_OPC_QUERY_RX_STATUS,
555 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
556 ret = hclge_cmd_send(&hdev->hw, desc, 1);
558 dev_err(&hdev->pdev->dev,
559 "Query tqp stat fail, status = %d,queue = %d\n",
563 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
564 le32_to_cpu(desc[0].data[1]);
567 for (i = 0; i < kinfo->num_tqps; i++) {
568 queue = handle->kinfo.tqp[i];
569 tqp = container_of(queue, struct hclge_tqp, q);
570 /* command : HCLGE_OPC_QUERY_IGU_STAT */
571 hclge_cmd_setup_basic_desc(&desc[0],
572 HCLGE_OPC_QUERY_TX_STATUS,
575 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
576 ret = hclge_cmd_send(&hdev->hw, desc, 1);
578 dev_err(&hdev->pdev->dev,
579 "Query tqp stat fail, status = %d,queue = %d\n",
583 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
584 le32_to_cpu(desc[0].data[1]);
590 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
592 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
593 struct hclge_tqp *tqp;
597 for (i = 0; i < kinfo->num_tqps; i++) {
598 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
599 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
602 for (i = 0; i < kinfo->num_tqps; i++) {
603 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
604 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
610 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
612 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
614 return kinfo->num_tqps * (2);
617 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
619 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623 for (i = 0; i < kinfo->num_tqps; i++) {
624 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
625 struct hclge_tqp, q);
626 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
628 buff = buff + ETH_GSTRING_LEN;
631 for (i = 0; i < kinfo->num_tqps; i++) {
632 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
633 struct hclge_tqp, q);
634 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
636 buff = buff + ETH_GSTRING_LEN;
642 static u64 *hclge_comm_get_stats(void *comm_stats,
643 const struct hclge_comm_stats_str strs[],
649 for (i = 0; i < size; i++)
650 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655 static u8 *hclge_comm_get_strings(u32 stringset,
656 const struct hclge_comm_stats_str strs[],
659 char *buff = (char *)data;
662 if (stringset != ETH_SS_STATS)
665 for (i = 0; i < size; i++) {
666 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
667 buff = buff + ETH_GSTRING_LEN;
673 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
674 struct net_device_stats *net_stats)
676 net_stats->tx_dropped = 0;
677 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
678 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
679 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
681 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
682 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
683 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
684 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
685 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
687 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
688 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
690 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
691 net_stats->rx_length_errors =
692 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
693 net_stats->rx_length_errors +=
694 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
695 net_stats->rx_over_errors =
696 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
699 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
701 struct hnae3_handle *handle;
704 handle = &hdev->vport[0].nic;
705 if (handle->client) {
706 status = hclge_tqps_update_stats(handle);
708 dev_err(&hdev->pdev->dev,
709 "Update TQPS stats fail, status = %d.\n",
714 status = hclge_mac_update_stats(hdev);
716 dev_err(&hdev->pdev->dev,
717 "Update MAC stats fail, status = %d.\n", status);
719 status = hclge_32_bit_update_stats(hdev);
721 dev_err(&hdev->pdev->dev,
722 "Update 32 bit stats fail, status = %d.\n",
725 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
728 static void hclge_update_stats(struct hnae3_handle *handle,
729 struct net_device_stats *net_stats)
731 struct hclge_vport *vport = hclge_get_vport(handle);
732 struct hclge_dev *hdev = vport->back;
733 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
736 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
739 status = hclge_mac_update_stats(hdev);
741 dev_err(&hdev->pdev->dev,
742 "Update MAC stats fail, status = %d.\n",
745 status = hclge_32_bit_update_stats(hdev);
747 dev_err(&hdev->pdev->dev,
748 "Update 32 bit stats fail, status = %d.\n",
751 status = hclge_64_bit_update_stats(hdev);
753 dev_err(&hdev->pdev->dev,
754 "Update 64 bit stats fail, status = %d.\n",
757 status = hclge_tqps_update_stats(handle);
759 dev_err(&hdev->pdev->dev,
760 "Update TQPS stats fail, status = %d.\n",
763 hclge_update_netstat(hw_stats, net_stats);
765 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
768 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
770 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
772 struct hclge_vport *vport = hclge_get_vport(handle);
773 struct hclge_dev *hdev = vport->back;
776 /* Loopback test support rules:
777 * mac: only GE mode support
778 * serdes: all mac mode will support include GE/XGE/LGE/CGE
779 * phy: only support when phy device exist on board
781 if (stringset == ETH_SS_TEST) {
782 /* clear loopback bit flags at first */
783 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
784 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
785 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
786 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
788 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
792 handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK;
793 } else if (stringset == ETH_SS_STATS) {
794 count = ARRAY_SIZE(g_mac_stats_string) +
795 ARRAY_SIZE(g_all_32bit_stats_string) +
796 ARRAY_SIZE(g_all_64bit_stats_string) +
797 hclge_tqps_get_sset_count(handle, stringset);
803 static void hclge_get_strings(struct hnae3_handle *handle,
807 u8 *p = (char *)data;
810 if (stringset == ETH_SS_STATS) {
811 size = ARRAY_SIZE(g_mac_stats_string);
812 p = hclge_comm_get_strings(stringset,
816 size = ARRAY_SIZE(g_all_32bit_stats_string);
817 p = hclge_comm_get_strings(stringset,
818 g_all_32bit_stats_string,
821 size = ARRAY_SIZE(g_all_64bit_stats_string);
822 p = hclge_comm_get_strings(stringset,
823 g_all_64bit_stats_string,
826 p = hclge_tqps_get_strings(handle, p);
827 } else if (stringset == ETH_SS_TEST) {
828 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
830 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
832 p += ETH_GSTRING_LEN;
834 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
836 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
838 p += ETH_GSTRING_LEN;
840 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
842 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
844 p += ETH_GSTRING_LEN;
849 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
851 struct hclge_vport *vport = hclge_get_vport(handle);
852 struct hclge_dev *hdev = vport->back;
855 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
857 ARRAY_SIZE(g_mac_stats_string),
859 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
860 g_all_32bit_stats_string,
861 ARRAY_SIZE(g_all_32bit_stats_string),
863 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
864 g_all_64bit_stats_string,
865 ARRAY_SIZE(g_all_64bit_stats_string),
867 p = hclge_tqps_get_stats(handle, p);
870 static int hclge_parse_func_status(struct hclge_dev *hdev,
871 struct hclge_func_status_cmd *status)
873 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
876 /* Set the pf to main pf */
877 if (status->pf_state & HCLGE_PF_STATE_MAIN)
878 hdev->flag |= HCLGE_FLAG_MAIN;
880 hdev->flag &= ~HCLGE_FLAG_MAIN;
885 static int hclge_query_function_status(struct hclge_dev *hdev)
887 struct hclge_func_status_cmd *req;
888 struct hclge_desc desc;
892 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
893 req = (struct hclge_func_status_cmd *)desc.data;
896 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
898 dev_err(&hdev->pdev->dev,
899 "query function status failed %d.\n",
905 /* Check pf reset is done */
908 usleep_range(1000, 2000);
909 } while (timeout++ < 5);
911 ret = hclge_parse_func_status(hdev, req);
916 static int hclge_query_pf_resource(struct hclge_dev *hdev)
918 struct hclge_pf_res_cmd *req;
919 struct hclge_desc desc;
922 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
923 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
925 dev_err(&hdev->pdev->dev,
926 "query pf resource failed %d.\n", ret);
930 req = (struct hclge_pf_res_cmd *)desc.data;
931 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
932 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
934 if (hnae3_dev_roce_supported(hdev)) {
935 hdev->roce_base_msix_offset =
936 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
937 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
939 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
940 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
942 /* PF should have NIC vectors and Roce vectors,
943 * NIC vectors are queued before Roce vectors.
945 hdev->num_msi = hdev->num_roce_msi +
946 hdev->roce_base_msix_offset;
949 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
950 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
956 static int hclge_parse_speed(int speed_cmd, int *speed)
960 *speed = HCLGE_MAC_SPEED_10M;
963 *speed = HCLGE_MAC_SPEED_100M;
966 *speed = HCLGE_MAC_SPEED_1G;
969 *speed = HCLGE_MAC_SPEED_10G;
972 *speed = HCLGE_MAC_SPEED_25G;
975 *speed = HCLGE_MAC_SPEED_40G;
978 *speed = HCLGE_MAC_SPEED_50G;
981 *speed = HCLGE_MAC_SPEED_100G;
990 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
993 unsigned long *supported = hdev->hw.mac.supported;
995 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
996 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
999 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1000 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1003 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1004 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1007 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1008 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1011 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1012 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1015 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
1016 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1019 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1021 u8 media_type = hdev->hw.mac.media_type;
1023 if (media_type != HNAE3_MEDIA_TYPE_FIBER)
1026 hclge_parse_fiber_link_mode(hdev, speed_ability);
1029 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1031 struct hclge_cfg_param_cmd *req;
1032 u64 mac_addr_tmp_high;
1036 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1038 /* get the configuration */
1039 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1042 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1043 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1044 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1045 HCLGE_CFG_TQP_DESC_N_M,
1046 HCLGE_CFG_TQP_DESC_N_S);
1048 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1049 HCLGE_CFG_PHY_ADDR_M,
1050 HCLGE_CFG_PHY_ADDR_S);
1051 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1052 HCLGE_CFG_MEDIA_TP_M,
1053 HCLGE_CFG_MEDIA_TP_S);
1054 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1055 HCLGE_CFG_RX_BUF_LEN_M,
1056 HCLGE_CFG_RX_BUF_LEN_S);
1057 /* get mac_address */
1058 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1059 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1060 HCLGE_CFG_MAC_ADDR_H_M,
1061 HCLGE_CFG_MAC_ADDR_H_S);
1063 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1065 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1066 HCLGE_CFG_DEFAULT_SPEED_M,
1067 HCLGE_CFG_DEFAULT_SPEED_S);
1068 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1069 HCLGE_CFG_RSS_SIZE_M,
1070 HCLGE_CFG_RSS_SIZE_S);
1072 for (i = 0; i < ETH_ALEN; i++)
1073 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1075 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1076 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1078 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1079 HCLGE_CFG_SPEED_ABILITY_M,
1080 HCLGE_CFG_SPEED_ABILITY_S);
1083 /* hclge_get_cfg: query the static parameter from flash
1084 * @hdev: pointer to struct hclge_dev
1085 * @hcfg: the config structure to be getted
1087 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1089 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1090 struct hclge_cfg_param_cmd *req;
1093 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1096 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1097 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1099 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1100 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1101 /* Len should be united by 4 bytes when send to hardware */
1102 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1103 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1104 req->offset = cpu_to_le32(offset);
1107 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1109 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1113 hclge_parse_cfg(hcfg, desc);
1118 static int hclge_get_cap(struct hclge_dev *hdev)
1122 ret = hclge_query_function_status(hdev);
1124 dev_err(&hdev->pdev->dev,
1125 "query function status error %d.\n", ret);
1129 /* get pf resource */
1130 ret = hclge_query_pf_resource(hdev);
1132 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1137 static int hclge_configure(struct hclge_dev *hdev)
1139 struct hclge_cfg cfg;
1142 ret = hclge_get_cfg(hdev, &cfg);
1144 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1148 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1149 hdev->base_tqp_pid = 0;
1150 hdev->rss_size_max = cfg.rss_size_max;
1151 hdev->rx_buf_len = cfg.rx_buf_len;
1152 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1153 hdev->hw.mac.media_type = cfg.media_type;
1154 hdev->hw.mac.phy_addr = cfg.phy_addr;
1155 hdev->num_desc = cfg.tqp_desc_num;
1156 hdev->tm_info.num_pg = 1;
1157 hdev->tc_max = cfg.tc_num;
1158 hdev->tm_info.hw_pfc_map = 0;
1160 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1162 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1166 hclge_parse_link_mode(hdev, cfg.speed_ability);
1168 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1169 (hdev->tc_max < 1)) {
1170 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1175 /* Dev does not support DCB */
1176 if (!hnae3_dev_dcb_supported(hdev)) {
1180 hdev->pfc_max = hdev->tc_max;
1183 hdev->tm_info.num_tc = hdev->tc_max;
1185 /* Currently not support uncontiuous tc */
1186 for (i = 0; i < hdev->tm_info.num_tc; i++)
1187 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1189 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1194 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1197 struct hclge_cfg_tso_status_cmd *req;
1198 struct hclge_desc desc;
1201 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1203 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1206 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1207 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1208 req->tso_mss_min = cpu_to_le16(tso_mss);
1211 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1212 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1213 req->tso_mss_max = cpu_to_le16(tso_mss);
1215 return hclge_cmd_send(&hdev->hw, &desc, 1);
1218 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1220 struct hclge_tqp *tqp;
1223 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1224 sizeof(struct hclge_tqp), GFP_KERNEL);
1230 for (i = 0; i < hdev->num_tqps; i++) {
1231 tqp->dev = &hdev->pdev->dev;
1234 tqp->q.ae_algo = &ae_algo;
1235 tqp->q.buf_size = hdev->rx_buf_len;
1236 tqp->q.desc_num = hdev->num_desc;
1237 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1238 i * HCLGE_TQP_REG_SIZE;
1246 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1247 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1249 struct hclge_tqp_map_cmd *req;
1250 struct hclge_desc desc;
1253 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1255 req = (struct hclge_tqp_map_cmd *)desc.data;
1256 req->tqp_id = cpu_to_le16(tqp_pid);
1257 req->tqp_vf = func_id;
1258 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1259 1 << HCLGE_TQP_MAP_EN_B;
1260 req->tqp_vid = cpu_to_le16(tqp_vid);
1262 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1264 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1269 static int hclge_assign_tqp(struct hclge_vport *vport)
1271 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1272 struct hclge_dev *hdev = vport->back;
1275 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1276 alloced < kinfo->num_tqps; i++) {
1277 if (!hdev->htqp[i].alloced) {
1278 hdev->htqp[i].q.handle = &vport->nic;
1279 hdev->htqp[i].q.tqp_index = alloced;
1280 hdev->htqp[i].q.desc_num = kinfo->num_desc;
1281 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1282 hdev->htqp[i].alloced = true;
1286 vport->alloc_tqps = kinfo->num_tqps;
1291 static int hclge_knic_setup(struct hclge_vport *vport,
1292 u16 num_tqps, u16 num_desc)
1294 struct hnae3_handle *nic = &vport->nic;
1295 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1296 struct hclge_dev *hdev = vport->back;
1299 kinfo->num_desc = num_desc;
1300 kinfo->rx_buf_len = hdev->rx_buf_len;
1301 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1303 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1304 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1306 for (i = 0; i < HNAE3_MAX_TC; i++) {
1307 if (hdev->hw_tc_map & BIT(i)) {
1308 kinfo->tc_info[i].enable = true;
1309 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1310 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1311 kinfo->tc_info[i].tc = i;
1313 /* Set to default queue if TC is disable */
1314 kinfo->tc_info[i].enable = false;
1315 kinfo->tc_info[i].tqp_offset = 0;
1316 kinfo->tc_info[i].tqp_count = 1;
1317 kinfo->tc_info[i].tc = 0;
1321 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1322 sizeof(struct hnae3_queue *), GFP_KERNEL);
1326 ret = hclge_assign_tqp(vport);
1328 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1333 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1334 struct hclge_vport *vport)
1336 struct hnae3_handle *nic = &vport->nic;
1337 struct hnae3_knic_private_info *kinfo;
1340 kinfo = &nic->kinfo;
1341 for (i = 0; i < kinfo->num_tqps; i++) {
1342 struct hclge_tqp *q =
1343 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1347 is_pf = !(vport->vport_id);
1348 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1357 static int hclge_map_tqp(struct hclge_dev *hdev)
1359 struct hclge_vport *vport = hdev->vport;
1362 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1363 for (i = 0; i < num_vport; i++) {
1366 ret = hclge_map_tqp_to_vport(hdev, vport);
1376 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1378 /* this would be initialized later */
1381 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1383 struct hnae3_handle *nic = &vport->nic;
1384 struct hclge_dev *hdev = vport->back;
1387 nic->pdev = hdev->pdev;
1388 nic->ae_algo = &ae_algo;
1389 nic->numa_node_mask = hdev->numa_node_mask;
1391 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1392 ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc);
1394 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1399 hclge_unic_setup(vport, num_tqps);
1405 static int hclge_alloc_vport(struct hclge_dev *hdev)
1407 struct pci_dev *pdev = hdev->pdev;
1408 struct hclge_vport *vport;
1414 /* We need to alloc a vport for main NIC of PF */
1415 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1417 if (hdev->num_tqps < num_vport) {
1418 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1419 hdev->num_tqps, num_vport);
1423 /* Alloc the same number of TQPs for every vport */
1424 tqp_per_vport = hdev->num_tqps / num_vport;
1425 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1427 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1432 hdev->vport = vport;
1433 hdev->num_alloc_vport = num_vport;
1435 if (IS_ENABLED(CONFIG_PCI_IOV))
1436 hdev->num_alloc_vfs = hdev->num_req_vfs;
1438 for (i = 0; i < num_vport; i++) {
1440 vport->vport_id = i;
1443 ret = hclge_vport_setup(vport, tqp_main_vport);
1445 ret = hclge_vport_setup(vport, tqp_per_vport);
1448 "vport setup failed for vport %d, %d\n",
1459 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1460 struct hclge_pkt_buf_alloc *buf_alloc)
1462 /* TX buffer size is unit by 128 byte */
1463 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1464 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1465 struct hclge_tx_buff_alloc_cmd *req;
1466 struct hclge_desc desc;
1470 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1472 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1473 for (i = 0; i < HCLGE_TC_NUM; i++) {
1474 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1476 req->tx_pkt_buff[i] =
1477 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1478 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1481 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1483 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1489 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1490 struct hclge_pkt_buf_alloc *buf_alloc)
1492 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1495 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1500 static int hclge_get_tc_num(struct hclge_dev *hdev)
1504 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1505 if (hdev->hw_tc_map & BIT(i))
1510 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1514 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1515 if (hdev->hw_tc_map & BIT(i) &&
1516 hdev->tm_info.hw_pfc_map & BIT(i))
1521 /* Get the number of pfc enabled TCs, which have private buffer */
1522 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1523 struct hclge_pkt_buf_alloc *buf_alloc)
1525 struct hclge_priv_buf *priv;
1528 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1529 priv = &buf_alloc->priv_buf[i];
1530 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1538 /* Get the number of pfc disabled TCs, which have private buffer */
1539 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1540 struct hclge_pkt_buf_alloc *buf_alloc)
1542 struct hclge_priv_buf *priv;
1545 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1546 priv = &buf_alloc->priv_buf[i];
1547 if (hdev->hw_tc_map & BIT(i) &&
1548 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1556 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1558 struct hclge_priv_buf *priv;
1562 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1563 priv = &buf_alloc->priv_buf[i];
1565 rx_priv += priv->buf_size;
1570 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1572 u32 i, total_tx_size = 0;
1574 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1575 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1577 return total_tx_size;
1580 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1581 struct hclge_pkt_buf_alloc *buf_alloc,
1584 u32 shared_buf_min, shared_buf_tc, shared_std;
1585 int tc_num, pfc_enable_num;
1590 tc_num = hclge_get_tc_num(hdev);
1591 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1593 if (hnae3_dev_dcb_supported(hdev))
1594 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1596 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1598 shared_buf_tc = pfc_enable_num * hdev->mps +
1599 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1601 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1603 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1604 if (rx_all <= rx_priv + shared_std)
1607 shared_buf = rx_all - rx_priv;
1608 buf_alloc->s_buf.buf_size = shared_buf;
1609 buf_alloc->s_buf.self.high = shared_buf;
1610 buf_alloc->s_buf.self.low = 2 * hdev->mps;
1612 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1613 if ((hdev->hw_tc_map & BIT(i)) &&
1614 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1615 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1616 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1618 buf_alloc->s_buf.tc_thrd[i].low = 0;
1619 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1626 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1627 struct hclge_pkt_buf_alloc *buf_alloc)
1631 total_size = hdev->pkt_buf_size;
1633 /* alloc tx buffer for all enabled tc */
1634 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1635 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1637 if (total_size < HCLGE_DEFAULT_TX_BUF)
1640 if (hdev->hw_tc_map & BIT(i))
1641 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1643 priv->tx_buf_size = 0;
1645 total_size -= priv->tx_buf_size;
1651 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1652 * @hdev: pointer to struct hclge_dev
1653 * @buf_alloc: pointer to buffer calculation data
1654 * @return: 0: calculate sucessful, negative: fail
1656 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1657 struct hclge_pkt_buf_alloc *buf_alloc)
1659 #define HCLGE_BUF_SIZE_UNIT 128
1660 u32 rx_all = hdev->pkt_buf_size, aligned_mps;
1661 int no_pfc_priv_num, pfc_priv_num;
1662 struct hclge_priv_buf *priv;
1665 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1666 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1668 /* When DCB is not supported, rx private
1669 * buffer is not allocated.
1671 if (!hnae3_dev_dcb_supported(hdev)) {
1672 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1678 /* step 1, try to alloc private buffer for all enabled tc */
1679 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1680 priv = &buf_alloc->priv_buf[i];
1681 if (hdev->hw_tc_map & BIT(i)) {
1683 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1684 priv->wl.low = aligned_mps;
1685 priv->wl.high = priv->wl.low + aligned_mps;
1686 priv->buf_size = priv->wl.high +
1690 priv->wl.high = 2 * aligned_mps;
1691 priv->buf_size = priv->wl.high;
1701 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1704 /* step 2, try to decrease the buffer size of
1705 * no pfc TC's private buffer
1707 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1708 priv = &buf_alloc->priv_buf[i];
1715 if (!(hdev->hw_tc_map & BIT(i)))
1720 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1722 priv->wl.high = priv->wl.low + aligned_mps;
1723 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1726 priv->wl.high = aligned_mps;
1727 priv->buf_size = priv->wl.high;
1731 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1734 /* step 3, try to reduce the number of pfc disabled TCs,
1735 * which have private buffer
1737 /* get the total no pfc enable TC number, which have private buffer */
1738 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1740 /* let the last to be cleared first */
1741 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1742 priv = &buf_alloc->priv_buf[i];
1744 if (hdev->hw_tc_map & BIT(i) &&
1745 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1746 /* Clear the no pfc TC private buffer */
1754 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1755 no_pfc_priv_num == 0)
1759 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1762 /* step 4, try to reduce the number of pfc enabled TCs
1763 * which have private buffer.
1765 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1767 /* let the last to be cleared first */
1768 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1769 priv = &buf_alloc->priv_buf[i];
1771 if (hdev->hw_tc_map & BIT(i) &&
1772 hdev->tm_info.hw_pfc_map & BIT(i)) {
1773 /* Reduce the number of pfc TC with private buffer */
1781 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1785 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1791 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1792 struct hclge_pkt_buf_alloc *buf_alloc)
1794 struct hclge_rx_priv_buff_cmd *req;
1795 struct hclge_desc desc;
1799 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1800 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1802 /* Alloc private buffer TCs */
1803 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1804 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1807 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1809 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1813 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1814 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1816 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1818 dev_err(&hdev->pdev->dev,
1819 "rx private buffer alloc cmd failed %d\n", ret);
1824 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1825 struct hclge_pkt_buf_alloc *buf_alloc)
1827 struct hclge_rx_priv_wl_buf *req;
1828 struct hclge_priv_buf *priv;
1829 struct hclge_desc desc[2];
1833 for (i = 0; i < 2; i++) {
1834 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1836 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1838 /* The first descriptor set the NEXT bit to 1 */
1840 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1842 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1844 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1845 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1847 priv = &buf_alloc->priv_buf[idx];
1848 req->tc_wl[j].high =
1849 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1850 req->tc_wl[j].high |=
1851 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1853 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1854 req->tc_wl[j].low |=
1855 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1859 /* Send 2 descriptor at one time */
1860 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1862 dev_err(&hdev->pdev->dev,
1863 "rx private waterline config cmd failed %d\n",
1868 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1869 struct hclge_pkt_buf_alloc *buf_alloc)
1871 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1872 struct hclge_rx_com_thrd *req;
1873 struct hclge_desc desc[2];
1874 struct hclge_tc_thrd *tc;
1878 for (i = 0; i < 2; i++) {
1879 hclge_cmd_setup_basic_desc(&desc[i],
1880 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1881 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1883 /* The first descriptor set the NEXT bit to 1 */
1885 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1887 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1889 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1890 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1892 req->com_thrd[j].high =
1893 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1894 req->com_thrd[j].high |=
1895 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1896 req->com_thrd[j].low =
1897 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1898 req->com_thrd[j].low |=
1899 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1903 /* Send 2 descriptors at one time */
1904 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1906 dev_err(&hdev->pdev->dev,
1907 "common threshold config cmd failed %d\n", ret);
1911 static int hclge_common_wl_config(struct hclge_dev *hdev,
1912 struct hclge_pkt_buf_alloc *buf_alloc)
1914 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1915 struct hclge_rx_com_wl *req;
1916 struct hclge_desc desc;
1919 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1921 req = (struct hclge_rx_com_wl *)desc.data;
1922 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1923 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1925 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1926 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1928 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1930 dev_err(&hdev->pdev->dev,
1931 "common waterline config cmd failed %d\n", ret);
1936 int hclge_buffer_alloc(struct hclge_dev *hdev)
1938 struct hclge_pkt_buf_alloc *pkt_buf;
1941 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1945 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1947 dev_err(&hdev->pdev->dev,
1948 "could not calc tx buffer size for all TCs %d\n", ret);
1952 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1954 dev_err(&hdev->pdev->dev,
1955 "could not alloc tx buffers %d\n", ret);
1959 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1961 dev_err(&hdev->pdev->dev,
1962 "could not calc rx priv buffer size for all TCs %d\n",
1967 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1969 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1974 if (hnae3_dev_dcb_supported(hdev)) {
1975 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1977 dev_err(&hdev->pdev->dev,
1978 "could not configure rx private waterline %d\n",
1983 ret = hclge_common_thrd_config(hdev, pkt_buf);
1985 dev_err(&hdev->pdev->dev,
1986 "could not configure common threshold %d\n",
1992 ret = hclge_common_wl_config(hdev, pkt_buf);
1994 dev_err(&hdev->pdev->dev,
1995 "could not configure common waterline %d\n", ret);
2002 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2004 struct hnae3_handle *roce = &vport->roce;
2005 struct hnae3_handle *nic = &vport->nic;
2007 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2009 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2010 vport->back->num_msi_left == 0)
2013 roce->rinfo.base_vector = vport->back->roce_base_vector;
2015 roce->rinfo.netdev = nic->kinfo.netdev;
2016 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2018 roce->pdev = nic->pdev;
2019 roce->ae_algo = nic->ae_algo;
2020 roce->numa_node_mask = nic->numa_node_mask;
2025 static int hclge_init_msi(struct hclge_dev *hdev)
2027 struct pci_dev *pdev = hdev->pdev;
2031 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2032 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2035 "failed(%d) to allocate MSI/MSI-X vectors\n",
2039 if (vectors < hdev->num_msi)
2040 dev_warn(&hdev->pdev->dev,
2041 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2042 hdev->num_msi, vectors);
2044 hdev->num_msi = vectors;
2045 hdev->num_msi_left = vectors;
2046 hdev->base_msi_vector = pdev->irq;
2047 hdev->roce_base_vector = hdev->base_msi_vector +
2048 hdev->roce_base_msix_offset;
2050 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2051 sizeof(u16), GFP_KERNEL);
2052 if (!hdev->vector_status) {
2053 pci_free_irq_vectors(pdev);
2057 for (i = 0; i < hdev->num_msi; i++)
2058 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2060 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2061 sizeof(int), GFP_KERNEL);
2062 if (!hdev->vector_irq) {
2063 pci_free_irq_vectors(pdev);
2070 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
2072 struct hclge_mac *mac = &hdev->hw.mac;
2074 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
2075 mac->duplex = (u8)duplex;
2077 mac->duplex = HCLGE_MAC_FULL;
2082 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2084 struct hclge_config_mac_speed_dup_cmd *req;
2085 struct hclge_desc desc;
2088 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2090 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2092 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2095 case HCLGE_MAC_SPEED_10M:
2096 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2097 HCLGE_CFG_SPEED_S, 6);
2099 case HCLGE_MAC_SPEED_100M:
2100 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2101 HCLGE_CFG_SPEED_S, 7);
2103 case HCLGE_MAC_SPEED_1G:
2104 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2105 HCLGE_CFG_SPEED_S, 0);
2107 case HCLGE_MAC_SPEED_10G:
2108 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2109 HCLGE_CFG_SPEED_S, 1);
2111 case HCLGE_MAC_SPEED_25G:
2112 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2113 HCLGE_CFG_SPEED_S, 2);
2115 case HCLGE_MAC_SPEED_40G:
2116 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2117 HCLGE_CFG_SPEED_S, 3);
2119 case HCLGE_MAC_SPEED_50G:
2120 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2121 HCLGE_CFG_SPEED_S, 4);
2123 case HCLGE_MAC_SPEED_100G:
2124 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2125 HCLGE_CFG_SPEED_S, 5);
2128 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2132 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2135 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2137 dev_err(&hdev->pdev->dev,
2138 "mac speed/duplex config cmd failed %d.\n", ret);
2142 hclge_check_speed_dup(hdev, duplex, speed);
2147 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2150 struct hclge_vport *vport = hclge_get_vport(handle);
2151 struct hclge_dev *hdev = vport->back;
2153 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2156 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2159 struct hclge_query_an_speed_dup_cmd *req;
2160 struct hclge_desc desc;
2164 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
2166 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2167 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2169 dev_err(&hdev->pdev->dev,
2170 "mac speed/autoneg/duplex query cmd failed %d\n",
2175 *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2176 speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2177 HCLGE_QUERY_SPEED_S);
2179 ret = hclge_parse_speed(speed_tmp, speed);
2181 dev_err(&hdev->pdev->dev,
2182 "could not parse speed(=%d), %d\n", speed_tmp, ret);
2187 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2189 struct hclge_config_auto_neg_cmd *req;
2190 struct hclge_desc desc;
2194 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2196 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2197 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2198 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2200 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2202 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2208 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2210 struct hclge_vport *vport = hclge_get_vport(handle);
2211 struct hclge_dev *hdev = vport->back;
2213 return hclge_set_autoneg_en(hdev, enable);
2216 static int hclge_get_autoneg(struct hnae3_handle *handle)
2218 struct hclge_vport *vport = hclge_get_vport(handle);
2219 struct hclge_dev *hdev = vport->back;
2220 struct phy_device *phydev = hdev->hw.mac.phydev;
2223 return phydev->autoneg;
2225 return hdev->hw.mac.autoneg;
2228 static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
2232 struct hclge_mac_vlan_mask_entry_cmd *req;
2233 struct hclge_desc desc;
2236 req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
2237 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
2239 hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
2241 ether_addr_copy(req->mac_mask, mac_mask);
2243 status = hclge_cmd_send(&hdev->hw, &desc, 1);
2245 dev_err(&hdev->pdev->dev,
2246 "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
2252 static int hclge_mac_init(struct hclge_dev *hdev)
2254 struct hnae3_handle *handle = &hdev->vport[0].nic;
2255 struct net_device *netdev = handle->kinfo.netdev;
2256 struct hclge_mac *mac = &hdev->hw.mac;
2257 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
2258 struct hclge_vport *vport;
2263 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2265 dev_err(&hdev->pdev->dev,
2266 "Config mac speed dup fail ret=%d\n", ret);
2272 /* Initialize the MTA table work mode */
2273 hdev->enable_mta = true;
2274 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2276 ret = hclge_set_mta_filter_mode(hdev,
2277 hdev->mta_mac_sel_type,
2280 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2285 for (i = 0; i < hdev->num_alloc_vport; i++) {
2286 vport = &hdev->vport[i];
2287 vport->accept_mta_mc = false;
2289 memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
2290 ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
2292 dev_err(&hdev->pdev->dev,
2293 "set mta filter mode fail ret=%d\n", ret);
2298 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
2300 dev_err(&hdev->pdev->dev,
2301 "set default mac_vlan_mask fail ret=%d\n", ret);
2310 ret = hclge_set_mtu(handle, mtu);
2312 dev_err(&hdev->pdev->dev,
2313 "set mtu failed ret=%d\n", ret);
2318 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2320 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2321 schedule_work(&hdev->mbx_service_task);
2324 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2326 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2327 schedule_work(&hdev->rst_service_task);
2330 static void hclge_task_schedule(struct hclge_dev *hdev)
2332 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2333 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2334 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2335 (void)schedule_work(&hdev->service_task);
2338 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2340 struct hclge_link_status_cmd *req;
2341 struct hclge_desc desc;
2345 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2346 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2348 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2353 req = (struct hclge_link_status_cmd *)desc.data;
2354 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2356 return !!link_status;
2359 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2364 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2367 mac_state = hclge_get_mac_link_status(hdev);
2369 if (hdev->hw.mac.phydev) {
2370 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2371 link_stat = mac_state &
2372 hdev->hw.mac.phydev->link;
2377 link_stat = mac_state;
2383 static void hclge_update_link_status(struct hclge_dev *hdev)
2385 struct hnae3_client *client = hdev->nic_client;
2386 struct hnae3_handle *handle;
2392 state = hclge_get_mac_phy_link(hdev);
2393 if (state != hdev->hw.mac.link) {
2394 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2395 handle = &hdev->vport[i].nic;
2396 client->ops->link_status_change(handle, state);
2398 hdev->hw.mac.link = state;
2402 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2404 struct hclge_mac mac = hdev->hw.mac;
2409 /* get the speed and duplex as autoneg'result from mac cmd when phy
2412 if (mac.phydev || !mac.autoneg)
2415 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2417 dev_err(&hdev->pdev->dev,
2418 "mac autoneg/speed/duplex query failed %d\n", ret);
2422 if ((mac.speed != speed) || (mac.duplex != duplex)) {
2423 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2425 dev_err(&hdev->pdev->dev,
2426 "mac speed/duplex config failed %d\n", ret);
2434 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2436 struct hclge_vport *vport = hclge_get_vport(handle);
2437 struct hclge_dev *hdev = vport->back;
2439 return hclge_update_speed_duplex(hdev);
2442 static int hclge_get_status(struct hnae3_handle *handle)
2444 struct hclge_vport *vport = hclge_get_vport(handle);
2445 struct hclge_dev *hdev = vport->back;
2447 hclge_update_link_status(hdev);
2449 return hdev->hw.mac.link;
2452 static void hclge_service_timer(struct timer_list *t)
2454 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2456 mod_timer(&hdev->service_timer, jiffies + HZ);
2457 hdev->hw_stats.stats_timer++;
2458 hclge_task_schedule(hdev);
2461 static void hclge_service_complete(struct hclge_dev *hdev)
2463 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2465 /* Flush memory before next watchdog */
2466 smp_mb__before_atomic();
2467 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2470 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2475 /* fetch the events from their corresponding regs */
2476 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2477 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2479 /* Assumption: If by any chance reset and mailbox events are reported
2480 * together then we will only process reset event in this go and will
2481 * defer the processing of the mailbox events. Since, we would have not
2482 * cleared RX CMDQ event this time we would receive again another
2483 * interrupt from H/W just for the mailbox.
2486 /* check for vector0 reset event sources */
2487 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2488 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2489 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2490 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2491 return HCLGE_VECTOR0_EVENT_RST;
2494 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2495 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2496 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2497 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2498 return HCLGE_VECTOR0_EVENT_RST;
2501 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2502 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2503 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2504 return HCLGE_VECTOR0_EVENT_RST;
2507 /* check for vector0 mailbox(=CMDQ RX) event source */
2508 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2509 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2510 *clearval = cmdq_src_reg;
2511 return HCLGE_VECTOR0_EVENT_MBX;
2514 return HCLGE_VECTOR0_EVENT_OTHER;
2517 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2520 switch (event_type) {
2521 case HCLGE_VECTOR0_EVENT_RST:
2522 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2524 case HCLGE_VECTOR0_EVENT_MBX:
2525 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2530 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2532 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2533 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2534 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2535 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2536 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2539 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2541 writel(enable ? 1 : 0, vector->addr);
2544 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2546 struct hclge_dev *hdev = data;
2550 hclge_enable_vector(&hdev->misc_vector, false);
2551 event_cause = hclge_check_event_cause(hdev, &clearval);
2553 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2554 switch (event_cause) {
2555 case HCLGE_VECTOR0_EVENT_RST:
2556 hclge_reset_task_schedule(hdev);
2558 case HCLGE_VECTOR0_EVENT_MBX:
2559 /* If we are here then,
2560 * 1. Either we are not handling any mbx task and we are not
2563 * 2. We could be handling a mbx task but nothing more is
2565 * In both cases, we should schedule mbx task as there are more
2566 * mbx messages reported by this interrupt.
2568 hclge_mbx_task_schedule(hdev);
2571 dev_warn(&hdev->pdev->dev,
2572 "received unknown or unhandled event of vector0\n");
2576 /* clear the source of interrupt if it is not cause by reset */
2577 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2578 hclge_clear_event_cause(hdev, event_cause, clearval);
2579 hclge_enable_vector(&hdev->misc_vector, true);
2585 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2587 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2588 dev_warn(&hdev->pdev->dev,
2589 "vector(vector_id %d) has been freed.\n", vector_id);
2593 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2594 hdev->num_msi_left += 1;
2595 hdev->num_msi_used -= 1;
2598 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2600 struct hclge_misc_vector *vector = &hdev->misc_vector;
2602 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2604 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2605 hdev->vector_status[0] = 0;
2607 hdev->num_msi_left -= 1;
2608 hdev->num_msi_used += 1;
2611 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2615 hclge_get_misc_vector(hdev);
2617 /* this would be explicitly freed in the end */
2618 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2619 0, "hclge_misc", hdev);
2621 hclge_free_vector(hdev, 0);
2622 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2623 hdev->misc_vector.vector_irq);
2629 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2631 free_irq(hdev->misc_vector.vector_irq, hdev);
2632 hclge_free_vector(hdev, 0);
2635 static int hclge_notify_client(struct hclge_dev *hdev,
2636 enum hnae3_reset_notify_type type)
2638 struct hnae3_client *client = hdev->nic_client;
2641 if (!client->ops->reset_notify)
2644 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2645 struct hnae3_handle *handle = &hdev->vport[i].nic;
2648 ret = client->ops->reset_notify(handle, type);
2656 static int hclge_reset_wait(struct hclge_dev *hdev)
2658 #define HCLGE_RESET_WATI_MS 100
2659 #define HCLGE_RESET_WAIT_CNT 5
2660 u32 val, reg, reg_bit;
2663 switch (hdev->reset_type) {
2664 case HNAE3_GLOBAL_RESET:
2665 reg = HCLGE_GLOBAL_RESET_REG;
2666 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2668 case HNAE3_CORE_RESET:
2669 reg = HCLGE_GLOBAL_RESET_REG;
2670 reg_bit = HCLGE_CORE_RESET_BIT;
2672 case HNAE3_FUNC_RESET:
2673 reg = HCLGE_FUN_RST_ING;
2674 reg_bit = HCLGE_FUN_RST_ING_B;
2677 dev_err(&hdev->pdev->dev,
2678 "Wait for unsupported reset type: %d\n",
2683 val = hclge_read_dev(&hdev->hw, reg);
2684 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2685 msleep(HCLGE_RESET_WATI_MS);
2686 val = hclge_read_dev(&hdev->hw, reg);
2690 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2691 dev_warn(&hdev->pdev->dev,
2692 "Wait for reset timeout: %d\n", hdev->reset_type);
2699 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2701 struct hclge_desc desc;
2702 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2705 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2706 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2707 req->fun_reset_vfid = func_id;
2709 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2711 dev_err(&hdev->pdev->dev,
2712 "send function reset cmd fail, status =%d\n", ret);
2717 static void hclge_do_reset(struct hclge_dev *hdev)
2719 struct pci_dev *pdev = hdev->pdev;
2722 switch (hdev->reset_type) {
2723 case HNAE3_GLOBAL_RESET:
2724 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2725 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2726 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2727 dev_info(&pdev->dev, "Global Reset requested\n");
2729 case HNAE3_CORE_RESET:
2730 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2731 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2732 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2733 dev_info(&pdev->dev, "Core Reset requested\n");
2735 case HNAE3_FUNC_RESET:
2736 dev_info(&pdev->dev, "PF Reset requested\n");
2737 hclge_func_reset_cmd(hdev, 0);
2738 /* schedule again to check later */
2739 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2740 hclge_reset_task_schedule(hdev);
2743 dev_warn(&pdev->dev,
2744 "Unsupported reset type: %d\n", hdev->reset_type);
2749 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2750 unsigned long *addr)
2752 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2754 /* return the highest priority reset level amongst all */
2755 if (test_bit(HNAE3_GLOBAL_RESET, addr))
2756 rst_level = HNAE3_GLOBAL_RESET;
2757 else if (test_bit(HNAE3_CORE_RESET, addr))
2758 rst_level = HNAE3_CORE_RESET;
2759 else if (test_bit(HNAE3_IMP_RESET, addr))
2760 rst_level = HNAE3_IMP_RESET;
2761 else if (test_bit(HNAE3_FUNC_RESET, addr))
2762 rst_level = HNAE3_FUNC_RESET;
2764 /* now, clear all other resets */
2765 clear_bit(HNAE3_GLOBAL_RESET, addr);
2766 clear_bit(HNAE3_CORE_RESET, addr);
2767 clear_bit(HNAE3_IMP_RESET, addr);
2768 clear_bit(HNAE3_FUNC_RESET, addr);
2773 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2777 switch (hdev->reset_type) {
2778 case HNAE3_IMP_RESET:
2779 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2781 case HNAE3_GLOBAL_RESET:
2782 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2784 case HNAE3_CORE_RESET:
2785 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2794 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2795 hclge_enable_vector(&hdev->misc_vector, true);
2798 static void hclge_reset(struct hclge_dev *hdev)
2800 struct hnae3_handle *handle;
2802 /* perform reset of the stack & ae device for a client */
2803 handle = &hdev->vport[0].nic;
2805 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2808 if (!hclge_reset_wait(hdev)) {
2810 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2811 hclge_reset_ae_dev(hdev->ae_dev);
2812 hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2814 hclge_clear_reset_cause(hdev);
2817 /* schedule again to check pending resets later */
2818 set_bit(hdev->reset_type, &hdev->reset_pending);
2819 hclge_reset_task_schedule(hdev);
2822 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2823 handle->last_reset_time = jiffies;
2827 static void hclge_reset_event(struct hnae3_handle *handle)
2829 struct hclge_vport *vport = hclge_get_vport(handle);
2830 struct hclge_dev *hdev = vport->back;
2832 /* check if this is a new reset request and we are not here just because
2833 * last reset attempt did not succeed and watchdog hit us again. We will
2834 * know this if last reset request did not occur very recently (watchdog
2835 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2836 * In case of new request we reset the "reset level" to PF reset.
2837 * And if it is a repeat reset request of the most recent one then we
2838 * want to make sure we throttle the reset request. Therefore, we will
2839 * not allow it again before 3*HZ times.
2841 if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
2843 else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
2844 handle->reset_level = HNAE3_FUNC_RESET;
2846 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2847 handle->reset_level);
2849 /* request reset & schedule reset task */
2850 set_bit(handle->reset_level, &hdev->reset_request);
2851 hclge_reset_task_schedule(hdev);
2853 if (handle->reset_level < HNAE3_GLOBAL_RESET)
2854 handle->reset_level++;
2857 static void hclge_reset_subtask(struct hclge_dev *hdev)
2859 /* check if there is any ongoing reset in the hardware. This status can
2860 * be checked from reset_pending. If there is then, we need to wait for
2861 * hardware to complete reset.
2862 * a. If we are able to figure out in reasonable time that hardware
2863 * has fully resetted then, we can proceed with driver, client
2865 * b. else, we can come back later to check this status so re-sched
2868 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2869 if (hdev->reset_type != HNAE3_NONE_RESET)
2872 /* check if we got any *new* reset requests to be honored */
2873 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2874 if (hdev->reset_type != HNAE3_NONE_RESET)
2875 hclge_do_reset(hdev);
2877 hdev->reset_type = HNAE3_NONE_RESET;
2880 static void hclge_reset_service_task(struct work_struct *work)
2882 struct hclge_dev *hdev =
2883 container_of(work, struct hclge_dev, rst_service_task);
2885 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2888 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2890 hclge_reset_subtask(hdev);
2892 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2895 static void hclge_mailbox_service_task(struct work_struct *work)
2897 struct hclge_dev *hdev =
2898 container_of(work, struct hclge_dev, mbx_service_task);
2900 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2903 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2905 hclge_mbx_handler(hdev);
2907 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2910 static void hclge_service_task(struct work_struct *work)
2912 struct hclge_dev *hdev =
2913 container_of(work, struct hclge_dev, service_task);
2915 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2916 hclge_update_stats_for_all(hdev);
2917 hdev->hw_stats.stats_timer = 0;
2920 hclge_update_speed_duplex(hdev);
2921 hclge_update_link_status(hdev);
2922 hclge_service_complete(hdev);
2925 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2927 /* VF handle has no client */
2928 if (!handle->client)
2929 return container_of(handle, struct hclge_vport, nic);
2930 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2931 return container_of(handle, struct hclge_vport, roce);
2933 return container_of(handle, struct hclge_vport, nic);
2936 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2937 struct hnae3_vector_info *vector_info)
2939 struct hclge_vport *vport = hclge_get_vport(handle);
2940 struct hnae3_vector_info *vector = vector_info;
2941 struct hclge_dev *hdev = vport->back;
2945 vector_num = min(hdev->num_msi_left, vector_num);
2947 for (j = 0; j < vector_num; j++) {
2948 for (i = 1; i < hdev->num_msi; i++) {
2949 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2950 vector->vector = pci_irq_vector(hdev->pdev, i);
2951 vector->io_addr = hdev->hw.io_base +
2952 HCLGE_VECTOR_REG_BASE +
2953 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2955 HCLGE_VECTOR_VF_OFFSET;
2956 hdev->vector_status[i] = vport->vport_id;
2957 hdev->vector_irq[i] = vector->vector;
2966 hdev->num_msi_left -= alloc;
2967 hdev->num_msi_used += alloc;
2972 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2976 for (i = 0; i < hdev->num_msi; i++)
2977 if (vector == hdev->vector_irq[i])
2983 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
2985 struct hclge_vport *vport = hclge_get_vport(handle);
2986 struct hclge_dev *hdev = vport->back;
2989 vector_id = hclge_get_vector_index(hdev, vector);
2990 if (vector_id < 0) {
2991 dev_err(&hdev->pdev->dev,
2992 "Get vector index fail. vector_id =%d\n", vector_id);
2996 hclge_free_vector(hdev, vector_id);
3001 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3003 return HCLGE_RSS_KEY_SIZE;
3006 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3008 return HCLGE_RSS_IND_TBL_SIZE;
3011 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3012 const u8 hfunc, const u8 *key)
3014 struct hclge_rss_config_cmd *req;
3015 struct hclge_desc desc;
3020 req = (struct hclge_rss_config_cmd *)desc.data;
3022 for (key_offset = 0; key_offset < 3; key_offset++) {
3023 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3026 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3027 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3029 if (key_offset == 2)
3031 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3033 key_size = HCLGE_RSS_HASH_KEY_NUM;
3035 memcpy(req->hash_key,
3036 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3038 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3040 dev_err(&hdev->pdev->dev,
3041 "Configure RSS config fail, status = %d\n",
3049 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3051 struct hclge_rss_indirection_table_cmd *req;
3052 struct hclge_desc desc;
3056 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3058 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3059 hclge_cmd_setup_basic_desc
3060 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3062 req->start_table_index =
3063 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3064 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3066 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3067 req->rss_result[j] =
3068 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3070 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3072 dev_err(&hdev->pdev->dev,
3073 "Configure rss indir table fail,status = %d\n",
3081 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3082 u16 *tc_size, u16 *tc_offset)
3084 struct hclge_rss_tc_mode_cmd *req;
3085 struct hclge_desc desc;
3089 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3090 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3092 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3095 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3096 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3097 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3098 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3099 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3101 req->rss_tc_mode[i] = cpu_to_le16(mode);
3104 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3106 dev_err(&hdev->pdev->dev,
3107 "Configure rss tc mode fail, status = %d\n", ret);
3112 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3114 struct hclge_rss_input_tuple_cmd *req;
3115 struct hclge_desc desc;
3118 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3120 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3122 /* Get the tuple cfg from pf */
3123 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3124 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3125 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3126 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3127 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3128 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3129 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3130 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3131 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3133 dev_err(&hdev->pdev->dev,
3134 "Configure rss input fail, status = %d\n", ret);
3138 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3141 struct hclge_vport *vport = hclge_get_vport(handle);
3144 /* Get hash algorithm */
3146 *hfunc = vport->rss_algo;
3148 /* Get the RSS Key required by the user */
3150 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3152 /* Get indirect table */
3154 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3155 indir[i] = vport->rss_indirection_tbl[i];
3160 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3161 const u8 *key, const u8 hfunc)
3163 struct hclge_vport *vport = hclge_get_vport(handle);
3164 struct hclge_dev *hdev = vport->back;
3168 /* Set the RSS Hash Key if specififed by the user */
3171 if (hfunc == ETH_RSS_HASH_TOP ||
3172 hfunc == ETH_RSS_HASH_NO_CHANGE)
3173 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3176 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3180 /* Update the shadow RSS key with user specified qids */
3181 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3182 vport->rss_algo = hash_algo;
3185 /* Update the shadow RSS table with user specified qids */
3186 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3187 vport->rss_indirection_tbl[i] = indir[i];
3189 /* Update the hardware */
3190 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3193 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3195 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3197 if (nfc->data & RXH_L4_B_2_3)
3198 hash_sets |= HCLGE_D_PORT_BIT;
3200 hash_sets &= ~HCLGE_D_PORT_BIT;
3202 if (nfc->data & RXH_IP_SRC)
3203 hash_sets |= HCLGE_S_IP_BIT;
3205 hash_sets &= ~HCLGE_S_IP_BIT;
3207 if (nfc->data & RXH_IP_DST)
3208 hash_sets |= HCLGE_D_IP_BIT;
3210 hash_sets &= ~HCLGE_D_IP_BIT;
3212 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3213 hash_sets |= HCLGE_V_TAG_BIT;
3218 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3219 struct ethtool_rxnfc *nfc)
3221 struct hclge_vport *vport = hclge_get_vport(handle);
3222 struct hclge_dev *hdev = vport->back;
3223 struct hclge_rss_input_tuple_cmd *req;
3224 struct hclge_desc desc;
3228 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3229 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3232 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3233 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3235 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3236 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3237 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3238 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3239 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3240 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3241 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3242 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3244 tuple_sets = hclge_get_rss_hash_bits(nfc);
3245 switch (nfc->flow_type) {
3247 req->ipv4_tcp_en = tuple_sets;
3250 req->ipv6_tcp_en = tuple_sets;
3253 req->ipv4_udp_en = tuple_sets;
3256 req->ipv6_udp_en = tuple_sets;
3259 req->ipv4_sctp_en = tuple_sets;
3262 if ((nfc->data & RXH_L4_B_0_1) ||
3263 (nfc->data & RXH_L4_B_2_3))
3266 req->ipv6_sctp_en = tuple_sets;
3269 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3272 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3278 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3280 dev_err(&hdev->pdev->dev,
3281 "Set rss tuple fail, status = %d\n", ret);
3285 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3286 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3287 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3288 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3289 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3290 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3291 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3292 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3296 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3297 struct ethtool_rxnfc *nfc)
3299 struct hclge_vport *vport = hclge_get_vport(handle);
3304 switch (nfc->flow_type) {
3306 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3309 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3312 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3315 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3318 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3321 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3325 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3334 if (tuple_sets & HCLGE_D_PORT_BIT)
3335 nfc->data |= RXH_L4_B_2_3;
3336 if (tuple_sets & HCLGE_S_PORT_BIT)
3337 nfc->data |= RXH_L4_B_0_1;
3338 if (tuple_sets & HCLGE_D_IP_BIT)
3339 nfc->data |= RXH_IP_DST;
3340 if (tuple_sets & HCLGE_S_IP_BIT)
3341 nfc->data |= RXH_IP_SRC;
3346 static int hclge_get_tc_size(struct hnae3_handle *handle)
3348 struct hclge_vport *vport = hclge_get_vport(handle);
3349 struct hclge_dev *hdev = vport->back;
3351 return hdev->rss_size_max;
3354 int hclge_rss_init_hw(struct hclge_dev *hdev)
3356 struct hclge_vport *vport = hdev->vport;
3357 u8 *rss_indir = vport[0].rss_indirection_tbl;
3358 u16 rss_size = vport[0].alloc_rss_size;
3359 u8 *key = vport[0].rss_hash_key;
3360 u8 hfunc = vport[0].rss_algo;
3361 u16 tc_offset[HCLGE_MAX_TC_NUM];
3362 u16 tc_valid[HCLGE_MAX_TC_NUM];
3363 u16 tc_size[HCLGE_MAX_TC_NUM];
3367 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3371 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3375 ret = hclge_set_rss_input_tuple(hdev);
3379 /* Each TC have the same queue size, and tc_size set to hardware is
3380 * the log2 of roundup power of two of rss_size, the acutal queue
3381 * size is limited by indirection table.
3383 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3384 dev_err(&hdev->pdev->dev,
3385 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3390 roundup_size = roundup_pow_of_two(rss_size);
3391 roundup_size = ilog2(roundup_size);
3393 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3396 if (!(hdev->hw_tc_map & BIT(i)))
3400 tc_size[i] = roundup_size;
3401 tc_offset[i] = rss_size * i;
3404 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3407 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3409 struct hclge_vport *vport = hdev->vport;
3412 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3413 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3414 vport[j].rss_indirection_tbl[i] =
3415 i % vport[j].alloc_rss_size;
3419 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3421 struct hclge_vport *vport = hdev->vport;
3424 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3425 vport[i].rss_tuple_sets.ipv4_tcp_en =
3426 HCLGE_RSS_INPUT_TUPLE_OTHER;
3427 vport[i].rss_tuple_sets.ipv4_udp_en =
3428 HCLGE_RSS_INPUT_TUPLE_OTHER;
3429 vport[i].rss_tuple_sets.ipv4_sctp_en =
3430 HCLGE_RSS_INPUT_TUPLE_SCTP;
3431 vport[i].rss_tuple_sets.ipv4_fragment_en =
3432 HCLGE_RSS_INPUT_TUPLE_OTHER;
3433 vport[i].rss_tuple_sets.ipv6_tcp_en =
3434 HCLGE_RSS_INPUT_TUPLE_OTHER;
3435 vport[i].rss_tuple_sets.ipv6_udp_en =
3436 HCLGE_RSS_INPUT_TUPLE_OTHER;
3437 vport[i].rss_tuple_sets.ipv6_sctp_en =
3438 HCLGE_RSS_INPUT_TUPLE_SCTP;
3439 vport[i].rss_tuple_sets.ipv6_fragment_en =
3440 HCLGE_RSS_INPUT_TUPLE_OTHER;
3442 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3444 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3447 hclge_rss_indir_init_cfg(hdev);
3450 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3451 int vector_id, bool en,
3452 struct hnae3_ring_chain_node *ring_chain)
3454 struct hclge_dev *hdev = vport->back;
3455 struct hnae3_ring_chain_node *node;
3456 struct hclge_desc desc;
3457 struct hclge_ctrl_vector_chain_cmd *req
3458 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3459 enum hclge_cmd_status status;
3460 enum hclge_opcode_type op;
3461 u16 tqp_type_and_id;
3464 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3465 hclge_cmd_setup_basic_desc(&desc, op, false);
3466 req->int_vector_id = vector_id;
3469 for (node = ring_chain; node; node = node->next) {
3470 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3471 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3473 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3474 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3475 HCLGE_TQP_ID_S, node->tqp_index);
3476 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3478 hnae3_get_field(node->int_gl_idx,
3479 HNAE3_RING_GL_IDX_M,
3480 HNAE3_RING_GL_IDX_S));
3481 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3482 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3483 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3484 req->vfid = vport->vport_id;
3486 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3488 dev_err(&hdev->pdev->dev,
3489 "Map TQP fail, status is %d.\n",
3495 hclge_cmd_setup_basic_desc(&desc,
3498 req->int_vector_id = vector_id;
3503 req->int_cause_num = i;
3504 req->vfid = vport->vport_id;
3505 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3507 dev_err(&hdev->pdev->dev,
3508 "Map TQP fail, status is %d.\n", status);
3516 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3518 struct hnae3_ring_chain_node *ring_chain)
3520 struct hclge_vport *vport = hclge_get_vport(handle);
3521 struct hclge_dev *hdev = vport->back;
3524 vector_id = hclge_get_vector_index(hdev, vector);
3525 if (vector_id < 0) {
3526 dev_err(&hdev->pdev->dev,
3527 "Get vector index fail. vector_id =%d\n", vector_id);
3531 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3534 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3536 struct hnae3_ring_chain_node *ring_chain)
3538 struct hclge_vport *vport = hclge_get_vport(handle);
3539 struct hclge_dev *hdev = vport->back;
3542 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3545 vector_id = hclge_get_vector_index(hdev, vector);
3546 if (vector_id < 0) {
3547 dev_err(&handle->pdev->dev,
3548 "Get vector index fail. ret =%d\n", vector_id);
3552 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3554 dev_err(&handle->pdev->dev,
3555 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3562 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3563 struct hclge_promisc_param *param)
3565 struct hclge_promisc_cfg_cmd *req;
3566 struct hclge_desc desc;
3569 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3571 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3572 req->vf_id = param->vf_id;
3574 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3575 * pdev revision(0x20), new revision support them. The
3576 * value of this two fields will not return error when driver
3577 * send command to fireware in revision(0x20).
3579 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3580 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3582 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3584 dev_err(&hdev->pdev->dev,
3585 "Set promisc mode fail, status is %d.\n", ret);
3590 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3591 bool en_mc, bool en_bc, int vport_id)
3596 memset(param, 0, sizeof(struct hclge_promisc_param));
3598 param->enable = HCLGE_PROMISC_EN_UC;
3600 param->enable |= HCLGE_PROMISC_EN_MC;
3602 param->enable |= HCLGE_PROMISC_EN_BC;
3603 param->vf_id = vport_id;
3606 static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3609 struct hclge_vport *vport = hclge_get_vport(handle);
3610 struct hclge_dev *hdev = vport->back;
3611 struct hclge_promisc_param param;
3613 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true,
3615 hclge_cmd_set_promisc_mode(hdev, ¶m);
3618 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
3620 struct hclge_desc desc;
3621 struct hclge_config_mac_mode_cmd *req =
3622 (struct hclge_config_mac_mode_cmd *)desc.data;
3626 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
3627 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
3628 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
3629 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
3630 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
3631 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
3632 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
3633 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3634 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
3635 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
3636 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
3637 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
3638 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
3639 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
3640 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
3641 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3643 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3645 dev_err(&hdev->pdev->dev,
3646 "mac enable fail, ret =%d.\n", ret);
3649 static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
3651 struct hclge_config_mac_mode_cmd *req;
3652 struct hclge_desc desc;
3656 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
3657 /* 1 Read out the MAC mode config at first */
3658 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
3659 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3661 dev_err(&hdev->pdev->dev,
3662 "mac loopback get fail, ret =%d.\n", ret);
3666 /* 2 Then setup the loopback flag */
3667 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
3668 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
3669 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
3670 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
3672 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3674 /* 3 Config mac work mode with loopback flag
3675 * and its original configure parameters
3677 hclge_cmd_reuse_desc(&desc, false);
3678 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3680 dev_err(&hdev->pdev->dev,
3681 "mac loopback set fail, ret =%d.\n", ret);
3685 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en)
3687 #define HCLGE_SERDES_RETRY_MS 10
3688 #define HCLGE_SERDES_RETRY_NUM 100
3689 struct hclge_serdes_lb_cmd *req;
3690 struct hclge_desc desc;
3693 req = (struct hclge_serdes_lb_cmd *)&desc.data[0];
3694 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
3697 req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
3698 req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
3700 req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
3703 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3705 dev_err(&hdev->pdev->dev,
3706 "serdes loopback set fail, ret = %d\n", ret);
3711 msleep(HCLGE_SERDES_RETRY_MS);
3712 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
3714 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3716 dev_err(&hdev->pdev->dev,
3717 "serdes loopback get, ret = %d\n", ret);
3720 } while (++i < HCLGE_SERDES_RETRY_NUM &&
3721 !(req->result & HCLGE_CMD_SERDES_DONE_B));
3723 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
3724 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
3726 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
3727 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
3731 hclge_cfg_mac_mode(hdev, en);
3735 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
3736 int stream_id, bool enable)
3738 struct hclge_desc desc;
3739 struct hclge_cfg_com_tqp_queue_cmd *req =
3740 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
3743 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
3744 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
3745 req->stream_id = cpu_to_le16(stream_id);
3746 req->enable |= enable << HCLGE_TQP_ENABLE_B;
3748 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3750 dev_err(&hdev->pdev->dev,
3751 "Tqp enable fail, status =%d.\n", ret);
3755 static int hclge_set_loopback(struct hnae3_handle *handle,
3756 enum hnae3_loop loop_mode, bool en)
3758 struct hclge_vport *vport = hclge_get_vport(handle);
3759 struct hclge_dev *hdev = vport->back;
3762 switch (loop_mode) {
3763 case HNAE3_MAC_INTER_LOOP_MAC:
3764 ret = hclge_set_mac_loopback(hdev, en);
3766 case HNAE3_MAC_INTER_LOOP_SERDES:
3767 ret = hclge_set_serdes_loopback(hdev, en);
3771 dev_err(&hdev->pdev->dev,
3772 "loop_mode %d is not supported\n", loop_mode);
3776 for (i = 0; i < vport->alloc_tqps; i++) {
3777 ret = hclge_tqp_enable(hdev, i, 0, en);
3785 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
3787 struct hclge_vport *vport = hclge_get_vport(handle);
3788 struct hnae3_queue *queue;
3789 struct hclge_tqp *tqp;
3792 for (i = 0; i < vport->alloc_tqps; i++) {
3793 queue = handle->kinfo.tqp[i];
3794 tqp = container_of(queue, struct hclge_tqp, q);
3795 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
3799 static int hclge_ae_start(struct hnae3_handle *handle)
3801 struct hclge_vport *vport = hclge_get_vport(handle);
3802 struct hclge_dev *hdev = vport->back;
3805 for (i = 0; i < vport->alloc_tqps; i++)
3806 hclge_tqp_enable(hdev, i, 0, true);
3809 hclge_cfg_mac_mode(hdev, true);
3810 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
3811 mod_timer(&hdev->service_timer, jiffies + HZ);
3812 hdev->hw.mac.link = 0;
3814 /* reset tqp stats */
3815 hclge_reset_tqp_stats(handle);
3817 hclge_mac_start_phy(hdev);
3822 static void hclge_ae_stop(struct hnae3_handle *handle)
3824 struct hclge_vport *vport = hclge_get_vport(handle);
3825 struct hclge_dev *hdev = vport->back;
3828 set_bit(HCLGE_STATE_DOWN, &hdev->state);
3830 del_timer_sync(&hdev->service_timer);
3831 cancel_work_sync(&hdev->service_task);
3832 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3834 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
3835 hclge_mac_stop_phy(hdev);
3839 for (i = 0; i < vport->alloc_tqps; i++)
3840 hclge_tqp_enable(hdev, i, 0, false);
3843 hclge_cfg_mac_mode(hdev, false);
3845 hclge_mac_stop_phy(hdev);
3847 /* reset tqp stats */
3848 hclge_reset_tqp_stats(handle);
3849 del_timer_sync(&hdev->service_timer);
3850 cancel_work_sync(&hdev->service_task);
3851 hclge_update_link_status(hdev);
3854 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
3855 u16 cmdq_resp, u8 resp_code,
3856 enum hclge_mac_vlan_tbl_opcode op)
3858 struct hclge_dev *hdev = vport->back;
3859 int return_status = -EIO;
3862 dev_err(&hdev->pdev->dev,
3863 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3868 if (op == HCLGE_MAC_VLAN_ADD) {
3869 if ((!resp_code) || (resp_code == 1)) {
3871 } else if (resp_code == 2) {
3872 return_status = -ENOSPC;
3873 dev_err(&hdev->pdev->dev,
3874 "add mac addr failed for uc_overflow.\n");
3875 } else if (resp_code == 3) {
3876 return_status = -ENOSPC;
3877 dev_err(&hdev->pdev->dev,
3878 "add mac addr failed for mc_overflow.\n");
3880 dev_err(&hdev->pdev->dev,
3881 "add mac addr failed for undefined, code=%d.\n",
3884 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
3887 } else if (resp_code == 1) {
3888 return_status = -ENOENT;
3889 dev_dbg(&hdev->pdev->dev,
3890 "remove mac addr failed for miss.\n");
3892 dev_err(&hdev->pdev->dev,
3893 "remove mac addr failed for undefined, code=%d.\n",
3896 } else if (op == HCLGE_MAC_VLAN_LKUP) {
3899 } else if (resp_code == 1) {
3900 return_status = -ENOENT;
3901 dev_dbg(&hdev->pdev->dev,
3902 "lookup mac addr failed for miss.\n");
3904 dev_err(&hdev->pdev->dev,
3905 "lookup mac addr failed for undefined, code=%d.\n",
3909 return_status = -EINVAL;
3910 dev_err(&hdev->pdev->dev,
3911 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3915 return return_status;
3918 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3923 if (vfid > 255 || vfid < 0)
3926 if (vfid >= 0 && vfid <= 191) {
3927 word_num = vfid / 32;
3928 bit_num = vfid % 32;
3930 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3932 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
3934 word_num = (vfid - 192) / 32;
3935 bit_num = vfid % 32;
3937 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3939 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
3945 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3947 #define HCLGE_DESC_NUMBER 3
3948 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3951 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
3952 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3953 if (desc[i].data[j])
3959 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3962 const unsigned char *mac_addr = addr;
3963 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3964 (mac_addr[0]) | (mac_addr[1] << 8);
3965 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
3967 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3968 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3971 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3974 u16 high_val = addr[1] | (addr[0] << 8);
3975 struct hclge_dev *hdev = vport->back;
3976 u32 rsh = 4 - hdev->mta_mac_sel_type;
3977 u16 ret_val = (high_val >> rsh) & 0xfff;
3982 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3983 enum hclge_mta_dmac_sel_type mta_mac_sel,
3986 struct hclge_mta_filter_mode_cmd *req;
3987 struct hclge_desc desc;
3990 req = (struct hclge_mta_filter_mode_cmd *)desc.data;
3991 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3993 hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3995 hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3996 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3998 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4000 dev_err(&hdev->pdev->dev,
4001 "Config mat filter mode failed for cmd_send, ret =%d.\n",
4007 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
4011 struct hclge_cfg_func_mta_filter_cmd *req;
4012 struct hclge_desc desc;
4015 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
4016 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
4018 hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
4020 req->function_id = func_id;
4022 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4024 dev_err(&hdev->pdev->dev,
4025 "Config func_id enable failed for cmd_send, ret =%d.\n",
4031 static int hclge_set_mta_table_item(struct hclge_vport *vport,
4035 struct hclge_dev *hdev = vport->back;
4036 struct hclge_cfg_func_mta_item_cmd *req;
4037 struct hclge_desc desc;
4041 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
4042 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
4043 hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
4045 hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
4046 HCLGE_CFG_MTA_ITEM_IDX_S, idx);
4047 req->item_idx = cpu_to_le16(item_idx);
4049 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4051 dev_err(&hdev->pdev->dev,
4052 "Config mta table item failed for cmd_send, ret =%d.\n",
4058 set_bit(idx, vport->mta_shadow);
4060 clear_bit(idx, vport->mta_shadow);
4065 static int hclge_update_mta_status(struct hnae3_handle *handle)
4067 unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
4068 struct hclge_vport *vport = hclge_get_vport(handle);
4069 struct net_device *netdev = handle->kinfo.netdev;
4070 struct netdev_hw_addr *ha;
4073 memset(mta_status, 0, sizeof(mta_status));
4075 /* update mta_status from mc addr list */
4076 netdev_for_each_mc_addr(ha, netdev) {
4077 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
4078 set_bit(tbl_idx, mta_status);
4081 return hclge_update_mta_status_common(vport, mta_status,
4082 0, HCLGE_MTA_TBL_SIZE, true);
4085 int hclge_update_mta_status_common(struct hclge_vport *vport,
4086 unsigned long *status,
4091 struct hclge_dev *hdev = vport->back;
4092 u16 update_max = idx + count;
4098 /* setup mta check range */
4099 if (update_filter) {
4101 check_max = HCLGE_MTA_TBL_SIZE;
4104 check_max = update_max;
4108 /* check and update all mta item */
4109 for (; i < check_max; i++) {
4110 /* ignore unused item */
4111 if (!test_bit(i, vport->mta_shadow))
4114 /* if i in update range then update it */
4115 if (i >= idx && i < update_max)
4116 if (!test_bit(i - idx, status))
4117 hclge_set_mta_table_item(vport, i, false);
4119 if (!used && test_bit(i, vport->mta_shadow))
4123 /* no longer use mta, disable it */
4124 if (vport->accept_mta_mc && update_filter && !used) {
4125 ret = hclge_cfg_func_mta_filter(hdev,
4129 dev_err(&hdev->pdev->dev,
4130 "disable func mta filter fail ret=%d\n",
4133 vport->accept_mta_mc = false;
4139 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
4140 struct hclge_mac_vlan_tbl_entry_cmd *req)
4142 struct hclge_dev *hdev = vport->back;
4143 struct hclge_desc desc;
4148 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
4150 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4152 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4154 dev_err(&hdev->pdev->dev,
4155 "del mac addr failed for cmd_send, ret =%d.\n",
4159 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4160 retval = le16_to_cpu(desc.retval);
4162 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
4163 HCLGE_MAC_VLAN_REMOVE);
4166 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
4167 struct hclge_mac_vlan_tbl_entry_cmd *req,
4168 struct hclge_desc *desc,
4171 struct hclge_dev *hdev = vport->back;
4176 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
4178 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4179 memcpy(desc[0].data,
4181 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4182 hclge_cmd_setup_basic_desc(&desc[1],
4183 HCLGE_OPC_MAC_VLAN_ADD,
4185 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4186 hclge_cmd_setup_basic_desc(&desc[2],
4187 HCLGE_OPC_MAC_VLAN_ADD,
4189 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4191 memcpy(desc[0].data,
4193 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4194 ret = hclge_cmd_send(&hdev->hw, desc, 1);
4197 dev_err(&hdev->pdev->dev,
4198 "lookup mac addr failed for cmd_send, ret =%d.\n",
4202 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
4203 retval = le16_to_cpu(desc[0].retval);
4205 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
4206 HCLGE_MAC_VLAN_LKUP);
4209 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
4210 struct hclge_mac_vlan_tbl_entry_cmd *req,
4211 struct hclge_desc *mc_desc)
4213 struct hclge_dev *hdev = vport->back;
4220 struct hclge_desc desc;
4222 hclge_cmd_setup_basic_desc(&desc,
4223 HCLGE_OPC_MAC_VLAN_ADD,
4225 memcpy(desc.data, req,
4226 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4227 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4228 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4229 retval = le16_to_cpu(desc.retval);
4231 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
4233 HCLGE_MAC_VLAN_ADD);
4235 hclge_cmd_reuse_desc(&mc_desc[0], false);
4236 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4237 hclge_cmd_reuse_desc(&mc_desc[1], false);
4238 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4239 hclge_cmd_reuse_desc(&mc_desc[2], false);
4240 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
4241 memcpy(mc_desc[0].data, req,
4242 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4243 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
4244 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
4245 retval = le16_to_cpu(mc_desc[0].retval);
4247 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
4249 HCLGE_MAC_VLAN_ADD);
4253 dev_err(&hdev->pdev->dev,
4254 "add mac addr failed for cmd_send, ret =%d.\n",
4262 static int hclge_add_uc_addr(struct hnae3_handle *handle,
4263 const unsigned char *addr)
4265 struct hclge_vport *vport = hclge_get_vport(handle);
4267 return hclge_add_uc_addr_common(vport, addr);
4270 int hclge_add_uc_addr_common(struct hclge_vport *vport,
4271 const unsigned char *addr)
4273 struct hclge_dev *hdev = vport->back;
4274 struct hclge_mac_vlan_tbl_entry_cmd req;
4275 struct hclge_desc desc;
4276 u16 egress_port = 0;
4279 /* mac addr check */
4280 if (is_zero_ether_addr(addr) ||
4281 is_broadcast_ether_addr(addr) ||
4282 is_multicast_ether_addr(addr)) {
4283 dev_err(&hdev->pdev->dev,
4284 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4286 is_zero_ether_addr(addr),
4287 is_broadcast_ether_addr(addr),
4288 is_multicast_ether_addr(addr));
4292 memset(&req, 0, sizeof(req));
4293 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4295 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
4296 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
4298 req.egress_port = cpu_to_le16(egress_port);
4300 hclge_prepare_mac_addr(&req, addr);
4302 /* Lookup the mac address in the mac_vlan table, and add
4303 * it if the entry is inexistent. Repeated unicast entry
4304 * is not allowed in the mac vlan table.
4306 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
4308 return hclge_add_mac_vlan_tbl(vport, &req, NULL);
4310 /* check if we just hit the duplicate */
4312 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
4313 vport->vport_id, addr);
4317 dev_err(&hdev->pdev->dev,
4318 "PF failed to add unicast entry(%pM) in the MAC table\n",
4324 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
4325 const unsigned char *addr)
4327 struct hclge_vport *vport = hclge_get_vport(handle);
4329 return hclge_rm_uc_addr_common(vport, addr);
4332 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
4333 const unsigned char *addr)
4335 struct hclge_dev *hdev = vport->back;
4336 struct hclge_mac_vlan_tbl_entry_cmd req;
4339 /* mac addr check */
4340 if (is_zero_ether_addr(addr) ||
4341 is_broadcast_ether_addr(addr) ||
4342 is_multicast_ether_addr(addr)) {
4343 dev_dbg(&hdev->pdev->dev,
4344 "Remove mac err! invalid mac:%pM.\n",
4349 memset(&req, 0, sizeof(req));
4350 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4351 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4352 hclge_prepare_mac_addr(&req, addr);
4353 ret = hclge_remove_mac_vlan_tbl(vport, &req);
4358 static int hclge_add_mc_addr(struct hnae3_handle *handle,
4359 const unsigned char *addr)
4361 struct hclge_vport *vport = hclge_get_vport(handle);
4363 return hclge_add_mc_addr_common(vport, addr);
4366 int hclge_add_mc_addr_common(struct hclge_vport *vport,
4367 const unsigned char *addr)
4369 struct hclge_dev *hdev = vport->back;
4370 struct hclge_mac_vlan_tbl_entry_cmd req;
4371 struct hclge_desc desc[3];
4375 /* mac addr check */
4376 if (!is_multicast_ether_addr(addr)) {
4377 dev_err(&hdev->pdev->dev,
4378 "Add mc mac err! invalid mac:%pM.\n",
4382 memset(&req, 0, sizeof(req));
4383 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4384 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4385 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4386 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4387 hclge_prepare_mac_addr(&req, addr);
4388 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4390 /* This mac addr exist, update VFID for it */
4391 hclge_update_desc_vfid(desc, vport->vport_id, false);
4392 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4394 /* This mac addr do not exist, add new entry for it */
4395 memset(desc[0].data, 0, sizeof(desc[0].data));
4396 memset(desc[1].data, 0, sizeof(desc[0].data));
4397 memset(desc[2].data, 0, sizeof(desc[0].data));
4398 hclge_update_desc_vfid(desc, vport->vport_id, false);
4399 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4402 /* If mc mac vlan table is full, use MTA table */
4403 if (status == -ENOSPC) {
4404 if (!vport->accept_mta_mc) {
4405 status = hclge_cfg_func_mta_filter(hdev,
4409 dev_err(&hdev->pdev->dev,
4410 "set mta filter mode fail ret=%d\n",
4414 vport->accept_mta_mc = true;
4417 /* Set MTA table for this MAC address */
4418 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4419 status = hclge_set_mta_table_item(vport, tbl_idx, true);
4425 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
4426 const unsigned char *addr)
4428 struct hclge_vport *vport = hclge_get_vport(handle);
4430 return hclge_rm_mc_addr_common(vport, addr);
4433 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
4434 const unsigned char *addr)
4436 struct hclge_dev *hdev = vport->back;
4437 struct hclge_mac_vlan_tbl_entry_cmd req;
4438 enum hclge_cmd_status status;
4439 struct hclge_desc desc[3];
4441 /* mac addr check */
4442 if (!is_multicast_ether_addr(addr)) {
4443 dev_dbg(&hdev->pdev->dev,
4444 "Remove mc mac err! invalid mac:%pM.\n",
4449 memset(&req, 0, sizeof(req));
4450 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4451 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4452 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4453 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4454 hclge_prepare_mac_addr(&req, addr);
4455 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4457 /* This mac addr exist, remove this handle's VFID for it */
4458 hclge_update_desc_vfid(desc, vport->vport_id, true);
4460 if (hclge_is_all_function_id_zero(desc))
4461 /* All the vfid is zero, so need to delete this entry */
4462 status = hclge_remove_mac_vlan_tbl(vport, &req);
4464 /* Not all the vfid is zero, update the vfid */
4465 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4468 /* Maybe this mac address is in mta table, but it cannot be
4469 * deleted here because an entry of mta represents an address
4470 * range rather than a specific address. the delete action to
4471 * all entries will take effect in update_mta_status called by
4472 * hns3_nic_set_rx_mode.
4480 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
4481 u16 cmdq_resp, u8 resp_code)
4483 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
4484 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
4485 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
4486 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
4491 dev_err(&hdev->pdev->dev,
4492 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
4497 switch (resp_code) {
4498 case HCLGE_ETHERTYPE_SUCCESS_ADD:
4499 case HCLGE_ETHERTYPE_ALREADY_ADD:
4502 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
4503 dev_err(&hdev->pdev->dev,
4504 "add mac ethertype failed for manager table overflow.\n");
4505 return_status = -EIO;
4507 case HCLGE_ETHERTYPE_KEY_CONFLICT:
4508 dev_err(&hdev->pdev->dev,
4509 "add mac ethertype failed for key conflict.\n");
4510 return_status = -EIO;
4513 dev_err(&hdev->pdev->dev,
4514 "add mac ethertype failed for undefined, code=%d.\n",
4516 return_status = -EIO;
4519 return return_status;
4522 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
4523 const struct hclge_mac_mgr_tbl_entry_cmd *req)
4525 struct hclge_desc desc;
4530 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
4531 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
4533 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4535 dev_err(&hdev->pdev->dev,
4536 "add mac ethertype failed for cmd_send, ret =%d.\n",
4541 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4542 retval = le16_to_cpu(desc.retval);
4544 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
4547 static int init_mgr_tbl(struct hclge_dev *hdev)
4552 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
4553 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
4555 dev_err(&hdev->pdev->dev,
4556 "add mac ethertype failed, ret =%d.\n",
4565 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
4567 struct hclge_vport *vport = hclge_get_vport(handle);
4568 struct hclge_dev *hdev = vport->back;
4570 ether_addr_copy(p, hdev->hw.mac.mac_addr);
4573 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
4576 const unsigned char *new_addr = (const unsigned char *)p;
4577 struct hclge_vport *vport = hclge_get_vport(handle);
4578 struct hclge_dev *hdev = vport->back;
4581 /* mac addr check */
4582 if (is_zero_ether_addr(new_addr) ||
4583 is_broadcast_ether_addr(new_addr) ||
4584 is_multicast_ether_addr(new_addr)) {
4585 dev_err(&hdev->pdev->dev,
4586 "Change uc mac err! invalid mac:%p.\n",
4591 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
4592 dev_warn(&hdev->pdev->dev,
4593 "remove old uc mac address fail.\n");
4595 ret = hclge_add_uc_addr(handle, new_addr);
4597 dev_err(&hdev->pdev->dev,
4598 "add uc mac address fail, ret =%d.\n",
4602 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
4603 dev_err(&hdev->pdev->dev,
4604 "restore uc mac address fail.\n");
4609 ret = hclge_pause_addr_cfg(hdev, new_addr);
4611 dev_err(&hdev->pdev->dev,
4612 "configure mac pause address fail, ret =%d.\n",
4617 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
4622 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
4625 struct hclge_vlan_filter_ctrl_cmd *req;
4626 struct hclge_desc desc;
4629 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
4631 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
4632 req->vlan_type = vlan_type;
4633 req->vlan_fe = filter_en;
4635 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4637 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
4643 #define HCLGE_FILTER_TYPE_VF 0
4644 #define HCLGE_FILTER_TYPE_PORT 1
4646 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
4648 struct hclge_vport *vport = hclge_get_vport(handle);
4649 struct hclge_dev *hdev = vport->back;
4651 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
4654 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
4655 bool is_kill, u16 vlan, u8 qos,
4658 #define HCLGE_MAX_VF_BYTES 16
4659 struct hclge_vlan_filter_vf_cfg_cmd *req0;
4660 struct hclge_vlan_filter_vf_cfg_cmd *req1;
4661 struct hclge_desc desc[2];
4666 hclge_cmd_setup_basic_desc(&desc[0],
4667 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4668 hclge_cmd_setup_basic_desc(&desc[1],
4669 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4671 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4673 vf_byte_off = vfid / 8;
4674 vf_byte_val = 1 << (vfid % 8);
4676 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
4677 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
4679 req0->vlan_id = cpu_to_le16(vlan);
4680 req0->vlan_cfg = is_kill;
4682 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
4683 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
4685 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
4687 ret = hclge_cmd_send(&hdev->hw, desc, 2);
4689 dev_err(&hdev->pdev->dev,
4690 "Send vf vlan command fail, ret =%d.\n",
4696 #define HCLGE_VF_VLAN_NO_ENTRY 2
4697 if (!req0->resp_code || req0->resp_code == 1)
4700 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
4701 dev_warn(&hdev->pdev->dev,
4702 "vf vlan table is full, vf vlan filter is disabled\n");
4706 dev_err(&hdev->pdev->dev,
4707 "Add vf vlan filter fail, ret =%d.\n",
4710 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
4711 if (!req0->resp_code)
4714 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
4715 dev_warn(&hdev->pdev->dev,
4716 "vlan %d filter is not in vf vlan table\n",
4721 dev_err(&hdev->pdev->dev,
4722 "Kill vf vlan filter fail, ret =%d.\n",
4729 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
4730 u16 vlan_id, bool is_kill)
4732 struct hclge_vlan_filter_pf_cfg_cmd *req;
4733 struct hclge_desc desc;
4734 u8 vlan_offset_byte_val;
4735 u8 vlan_offset_byte;
4739 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
4741 vlan_offset_160 = vlan_id / 160;
4742 vlan_offset_byte = (vlan_id % 160) / 8;
4743 vlan_offset_byte_val = 1 << (vlan_id % 8);
4745 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
4746 req->vlan_offset = vlan_offset_160;
4747 req->vlan_cfg = is_kill;
4748 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
4750 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4752 dev_err(&hdev->pdev->dev,
4753 "port vlan command, send fail, ret =%d.\n", ret);
4757 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
4758 u16 vport_id, u16 vlan_id, u8 qos,
4761 u16 vport_idx, vport_num = 0;
4764 if (is_kill && !vlan_id)
4767 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
4770 dev_err(&hdev->pdev->dev,
4771 "Set %d vport vlan filter config fail, ret =%d.\n",
4776 /* vlan 0 may be added twice when 8021q module is enabled */
4777 if (!is_kill && !vlan_id &&
4778 test_bit(vport_id, hdev->vlan_table[vlan_id]))
4781 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
4782 dev_err(&hdev->pdev->dev,
4783 "Add port vlan failed, vport %d is already in vlan %d\n",
4789 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
4790 dev_err(&hdev->pdev->dev,
4791 "Delete port vlan failed, vport %d is not in vlan %d\n",
4796 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
4799 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
4800 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
4806 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
4807 u16 vlan_id, bool is_kill)
4809 struct hclge_vport *vport = hclge_get_vport(handle);
4810 struct hclge_dev *hdev = vport->back;
4812 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
4816 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
4817 u16 vlan, u8 qos, __be16 proto)
4819 struct hclge_vport *vport = hclge_get_vport(handle);
4820 struct hclge_dev *hdev = vport->back;
4822 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
4824 if (proto != htons(ETH_P_8021Q))
4825 return -EPROTONOSUPPORT;
4827 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
4830 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
4832 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
4833 struct hclge_vport_vtag_tx_cfg_cmd *req;
4834 struct hclge_dev *hdev = vport->back;
4835 struct hclge_desc desc;
4839 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
4841 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
4842 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
4843 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
4844 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
4845 vcfg->accept_tag1 ? 1 : 0);
4846 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
4847 vcfg->accept_untag1 ? 1 : 0);
4848 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
4849 vcfg->accept_tag2 ? 1 : 0);
4850 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
4851 vcfg->accept_untag2 ? 1 : 0);
4852 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
4853 vcfg->insert_tag1_en ? 1 : 0);
4854 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
4855 vcfg->insert_tag2_en ? 1 : 0);
4856 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
4858 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
4859 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
4860 HCLGE_VF_NUM_PER_BYTE;
4861 req->vf_bitmap[bmap_index] =
4862 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
4864 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4866 dev_err(&hdev->pdev->dev,
4867 "Send port txvlan cfg command fail, ret =%d\n",
4873 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
4875 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
4876 struct hclge_vport_vtag_rx_cfg_cmd *req;
4877 struct hclge_dev *hdev = vport->back;
4878 struct hclge_desc desc;
4882 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
4884 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
4885 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
4886 vcfg->strip_tag1_en ? 1 : 0);
4887 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
4888 vcfg->strip_tag2_en ? 1 : 0);
4889 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
4890 vcfg->vlan1_vlan_prionly ? 1 : 0);
4891 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
4892 vcfg->vlan2_vlan_prionly ? 1 : 0);
4894 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
4895 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
4896 HCLGE_VF_NUM_PER_BYTE;
4897 req->vf_bitmap[bmap_index] =
4898 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
4900 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4902 dev_err(&hdev->pdev->dev,
4903 "Send port rxvlan cfg command fail, ret =%d\n",
4909 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
4911 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
4912 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
4913 struct hclge_desc desc;
4916 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
4917 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
4918 rx_req->ot_fst_vlan_type =
4919 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
4920 rx_req->ot_sec_vlan_type =
4921 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
4922 rx_req->in_fst_vlan_type =
4923 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
4924 rx_req->in_sec_vlan_type =
4925 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
4927 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4929 dev_err(&hdev->pdev->dev,
4930 "Send rxvlan protocol type command fail, ret =%d\n",
4935 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
4937 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
4938 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
4939 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
4941 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4943 dev_err(&hdev->pdev->dev,
4944 "Send txvlan protocol type command fail, ret =%d\n",
4950 static int hclge_init_vlan_config(struct hclge_dev *hdev)
4952 #define HCLGE_DEF_VLAN_TYPE 0x8100
4954 struct hnae3_handle *handle;
4955 struct hclge_vport *vport;
4959 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
4963 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
4967 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
4968 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
4969 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
4970 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
4971 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
4972 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
4974 ret = hclge_set_vlan_protocol_type(hdev);
4978 for (i = 0; i < hdev->num_alloc_vport; i++) {
4979 vport = &hdev->vport[i];
4980 vport->txvlan_cfg.accept_tag1 = true;
4981 vport->txvlan_cfg.accept_untag1 = true;
4983 /* accept_tag2 and accept_untag2 are not supported on
4984 * pdev revision(0x20), new revision support them. The
4985 * value of this two fields will not return error when driver
4986 * send command to fireware in revision(0x20).
4987 * This two fields can not configured by user.
4989 vport->txvlan_cfg.accept_tag2 = true;
4990 vport->txvlan_cfg.accept_untag2 = true;
4992 vport->txvlan_cfg.insert_tag1_en = false;
4993 vport->txvlan_cfg.insert_tag2_en = false;
4994 vport->txvlan_cfg.default_tag1 = 0;
4995 vport->txvlan_cfg.default_tag2 = 0;
4997 ret = hclge_set_vlan_tx_offload_cfg(vport);
5001 vport->rxvlan_cfg.strip_tag1_en = false;
5002 vport->rxvlan_cfg.strip_tag2_en = true;
5003 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
5004 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
5006 ret = hclge_set_vlan_rx_offload_cfg(vport);
5011 handle = &hdev->vport[0].nic;
5012 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
5015 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
5017 struct hclge_vport *vport = hclge_get_vport(handle);
5019 vport->rxvlan_cfg.strip_tag1_en = false;
5020 vport->rxvlan_cfg.strip_tag2_en = enable;
5021 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
5022 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
5024 return hclge_set_vlan_rx_offload_cfg(vport);
5027 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
5029 struct hclge_config_max_frm_size_cmd *req;
5030 struct hclge_desc desc;
5034 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5036 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
5037 max_frm_size > HCLGE_MAC_MAX_FRAME)
5040 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
5042 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
5044 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
5045 req->max_frm_size = cpu_to_le16(max_frm_size);
5046 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
5048 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5050 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
5052 hdev->mps = max_frm_size;
5057 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
5059 struct hclge_vport *vport = hclge_get_vport(handle);
5060 struct hclge_dev *hdev = vport->back;
5063 ret = hclge_set_mac_mtu(hdev, new_mtu);
5065 dev_err(&hdev->pdev->dev,
5066 "Change mtu fail, ret =%d\n", ret);
5070 ret = hclge_buffer_alloc(hdev);
5072 dev_err(&hdev->pdev->dev,
5073 "Allocate buffer fail, ret =%d\n", ret);
5078 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
5081 struct hclge_reset_tqp_queue_cmd *req;
5082 struct hclge_desc desc;
5085 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
5087 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
5088 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
5089 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
5091 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5093 dev_err(&hdev->pdev->dev,
5094 "Send tqp reset cmd error, status =%d\n", ret);
5101 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
5103 struct hclge_reset_tqp_queue_cmd *req;
5104 struct hclge_desc desc;
5107 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
5109 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
5110 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
5112 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5114 dev_err(&hdev->pdev->dev,
5115 "Get reset status error, status =%d\n", ret);
5119 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
5122 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
5125 struct hnae3_queue *queue;
5126 struct hclge_tqp *tqp;
5128 queue = handle->kinfo.tqp[queue_id];
5129 tqp = container_of(queue, struct hclge_tqp, q);
5134 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
5136 struct hclge_vport *vport = hclge_get_vport(handle);
5137 struct hclge_dev *hdev = vport->back;
5138 int reset_try_times = 0;
5143 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5146 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
5148 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
5150 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
5154 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
5156 dev_warn(&hdev->pdev->dev,
5157 "Send reset tqp cmd fail, ret = %d\n", ret);
5161 reset_try_times = 0;
5162 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5163 /* Wait for tqp hw reset */
5165 reset_status = hclge_get_reset_status(hdev, queue_gid);
5170 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
5171 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
5175 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
5177 dev_warn(&hdev->pdev->dev,
5178 "Deassert the soft reset fail, ret = %d\n", ret);
5183 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
5185 struct hnae3_handle *handle = &vport->nic;
5186 struct hclge_dev *hdev = vport->back;
5187 int reset_try_times = 0;
5192 if (queue_id >= handle->kinfo.num_tqps) {
5193 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
5198 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
5200 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
5202 dev_warn(&hdev->pdev->dev,
5203 "Send reset tqp cmd fail, ret = %d\n", ret);
5207 reset_try_times = 0;
5208 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5209 /* Wait for tqp hw reset */
5211 reset_status = hclge_get_reset_status(hdev, queue_gid);
5216 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
5217 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
5221 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
5223 dev_warn(&hdev->pdev->dev,
5224 "Deassert the soft reset fail, ret = %d\n", ret);
5227 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
5229 struct hclge_vport *vport = hclge_get_vport(handle);
5230 struct hclge_dev *hdev = vport->back;
5232 return hdev->fw_version;
5235 static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
5238 struct hclge_vport *vport = hclge_get_vport(handle);
5239 struct hclge_dev *hdev = vport->back;
5240 struct phy_device *phydev = hdev->hw.mac.phydev;
5245 *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
5246 (phydev->advertising & ADVERTISED_Asym_Pause);
5249 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
5251 struct phy_device *phydev = hdev->hw.mac.phydev;
5256 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
5259 phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
5262 phydev->advertising ^= ADVERTISED_Asym_Pause;
5265 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
5270 hdev->fc_mode_last_time = HCLGE_FC_FULL;
5271 else if (rx_en && !tx_en)
5272 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
5273 else if (!rx_en && tx_en)
5274 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
5276 hdev->fc_mode_last_time = HCLGE_FC_NONE;
5278 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
5281 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
5283 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
5288 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
5293 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
5295 struct phy_device *phydev = hdev->hw.mac.phydev;
5296 u16 remote_advertising = 0;
5297 u16 local_advertising = 0;
5298 u32 rx_pause, tx_pause;
5301 if (!phydev->link || !phydev->autoneg)
5304 if (phydev->advertising & ADVERTISED_Pause)
5305 local_advertising = ADVERTISE_PAUSE_CAP;
5307 if (phydev->advertising & ADVERTISED_Asym_Pause)
5308 local_advertising |= ADVERTISE_PAUSE_ASYM;
5311 remote_advertising = LPA_PAUSE_CAP;
5313 if (phydev->asym_pause)
5314 remote_advertising |= LPA_PAUSE_ASYM;
5316 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
5317 remote_advertising);
5318 tx_pause = flowctl & FLOW_CTRL_TX;
5319 rx_pause = flowctl & FLOW_CTRL_RX;
5321 if (phydev->duplex == HCLGE_MAC_HALF) {
5326 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
5329 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
5330 u32 *rx_en, u32 *tx_en)
5332 struct hclge_vport *vport = hclge_get_vport(handle);
5333 struct hclge_dev *hdev = vport->back;
5335 *auto_neg = hclge_get_autoneg(handle);
5337 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
5343 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
5346 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
5349 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
5358 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
5359 u32 rx_en, u32 tx_en)
5361 struct hclge_vport *vport = hclge_get_vport(handle);
5362 struct hclge_dev *hdev = vport->back;
5363 struct phy_device *phydev = hdev->hw.mac.phydev;
5366 fc_autoneg = hclge_get_autoneg(handle);
5367 if (auto_neg != fc_autoneg) {
5368 dev_info(&hdev->pdev->dev,
5369 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
5373 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
5374 dev_info(&hdev->pdev->dev,
5375 "Priority flow control enabled. Cannot set link flow control.\n");
5379 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
5382 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
5384 /* Only support flow control negotiation for netdev with
5385 * phy attached for now.
5390 return phy_start_aneg(phydev);
5393 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
5394 u8 *auto_neg, u32 *speed, u8 *duplex)
5396 struct hclge_vport *vport = hclge_get_vport(handle);
5397 struct hclge_dev *hdev = vport->back;
5400 *speed = hdev->hw.mac.speed;
5402 *duplex = hdev->hw.mac.duplex;
5404 *auto_neg = hdev->hw.mac.autoneg;
5407 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
5409 struct hclge_vport *vport = hclge_get_vport(handle);
5410 struct hclge_dev *hdev = vport->back;
5413 *media_type = hdev->hw.mac.media_type;
5416 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
5417 u8 *tp_mdix_ctrl, u8 *tp_mdix)
5419 struct hclge_vport *vport = hclge_get_vport(handle);
5420 struct hclge_dev *hdev = vport->back;
5421 struct phy_device *phydev = hdev->hw.mac.phydev;
5422 int mdix_ctrl, mdix, retval, is_resolved;
5425 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
5426 *tp_mdix = ETH_TP_MDI_INVALID;
5430 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
5432 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
5433 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
5434 HCLGE_PHY_MDIX_CTRL_S);
5436 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
5437 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
5438 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
5440 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
5442 switch (mdix_ctrl) {
5444 *tp_mdix_ctrl = ETH_TP_MDI;
5447 *tp_mdix_ctrl = ETH_TP_MDI_X;
5450 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
5453 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
5458 *tp_mdix = ETH_TP_MDI_INVALID;
5460 *tp_mdix = ETH_TP_MDI_X;
5462 *tp_mdix = ETH_TP_MDI;
5465 static int hclge_init_instance_hw(struct hclge_dev *hdev)
5467 return hclge_mac_connect_phy(hdev);
5470 static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
5472 hclge_mac_disconnect_phy(hdev);
5475 static int hclge_init_client_instance(struct hnae3_client *client,
5476 struct hnae3_ae_dev *ae_dev)
5478 struct hclge_dev *hdev = ae_dev->priv;
5479 struct hclge_vport *vport;
5482 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
5483 vport = &hdev->vport[i];
5485 switch (client->type) {
5486 case HNAE3_CLIENT_KNIC:
5488 hdev->nic_client = client;
5489 vport->nic.client = client;
5490 ret = client->ops->init_instance(&vport->nic);
5494 ret = hclge_init_instance_hw(hdev);
5496 client->ops->uninit_instance(&vport->nic,
5501 hnae3_set_client_init_flag(client, ae_dev, 1);
5503 if (hdev->roce_client &&
5504 hnae3_dev_roce_supported(hdev)) {
5505 struct hnae3_client *rc = hdev->roce_client;
5507 ret = hclge_init_roce_base_info(vport);
5511 ret = rc->ops->init_instance(&vport->roce);
5515 hnae3_set_client_init_flag(hdev->roce_client,
5520 case HNAE3_CLIENT_UNIC:
5521 hdev->nic_client = client;
5522 vport->nic.client = client;
5524 ret = client->ops->init_instance(&vport->nic);
5528 hnae3_set_client_init_flag(client, ae_dev, 1);
5531 case HNAE3_CLIENT_ROCE:
5532 if (hnae3_dev_roce_supported(hdev)) {
5533 hdev->roce_client = client;
5534 vport->roce.client = client;
5537 if (hdev->roce_client && hdev->nic_client) {
5538 ret = hclge_init_roce_base_info(vport);
5542 ret = client->ops->init_instance(&vport->roce);
5546 hnae3_set_client_init_flag(client, ae_dev, 1);
5554 hdev->nic_client = NULL;
5555 vport->nic.client = NULL;
5558 hdev->roce_client = NULL;
5559 vport->roce.client = NULL;
5563 static void hclge_uninit_client_instance(struct hnae3_client *client,
5564 struct hnae3_ae_dev *ae_dev)
5566 struct hclge_dev *hdev = ae_dev->priv;
5567 struct hclge_vport *vport;
5570 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
5571 vport = &hdev->vport[i];
5572 if (hdev->roce_client) {
5573 hdev->roce_client->ops->uninit_instance(&vport->roce,
5575 hdev->roce_client = NULL;
5576 vport->roce.client = NULL;
5578 if (client->type == HNAE3_CLIENT_ROCE)
5580 if (hdev->nic_client && client->ops->uninit_instance) {
5581 hclge_uninit_instance_hw(hdev);
5582 client->ops->uninit_instance(&vport->nic, 0);
5583 hdev->nic_client = NULL;
5584 vport->nic.client = NULL;
5589 static int hclge_pci_init(struct hclge_dev *hdev)
5591 struct pci_dev *pdev = hdev->pdev;
5592 struct hclge_hw *hw;
5595 ret = pci_enable_device(pdev);
5597 dev_err(&pdev->dev, "failed to enable PCI device\n");
5601 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5603 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5606 "can't set consistent PCI DMA");
5607 goto err_disable_device;
5609 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
5612 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
5614 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
5615 goto err_disable_device;
5618 pci_set_master(pdev);
5620 hw->io_base = pcim_iomap(pdev, 2, 0);
5622 dev_err(&pdev->dev, "Can't map configuration register space\n");
5624 goto err_clr_master;
5627 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
5631 pci_clear_master(pdev);
5632 pci_release_regions(pdev);
5634 pci_disable_device(pdev);
5639 static void hclge_pci_uninit(struct hclge_dev *hdev)
5641 struct pci_dev *pdev = hdev->pdev;
5643 pcim_iounmap(pdev, hdev->hw.io_base);
5644 pci_free_irq_vectors(pdev);
5645 pci_clear_master(pdev);
5646 pci_release_mem_regions(pdev);
5647 pci_disable_device(pdev);
5650 static void hclge_state_init(struct hclge_dev *hdev)
5652 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
5653 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5654 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
5655 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5656 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
5657 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
5660 static void hclge_state_uninit(struct hclge_dev *hdev)
5662 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5664 if (hdev->service_timer.function)
5665 del_timer_sync(&hdev->service_timer);
5666 if (hdev->service_task.func)
5667 cancel_work_sync(&hdev->service_task);
5668 if (hdev->rst_service_task.func)
5669 cancel_work_sync(&hdev->rst_service_task);
5670 if (hdev->mbx_service_task.func)
5671 cancel_work_sync(&hdev->mbx_service_task);
5674 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
5676 struct pci_dev *pdev = ae_dev->pdev;
5677 struct hclge_dev *hdev;
5680 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
5687 hdev->ae_dev = ae_dev;
5688 hdev->reset_type = HNAE3_NONE_RESET;
5689 ae_dev->priv = hdev;
5691 ret = hclge_pci_init(hdev);
5693 dev_err(&pdev->dev, "PCI init failed\n");
5697 /* Firmware command queue initialize */
5698 ret = hclge_cmd_queue_init(hdev);
5700 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
5701 goto err_pci_uninit;
5704 /* Firmware command initialize */
5705 ret = hclge_cmd_init(hdev);
5707 goto err_cmd_uninit;
5709 ret = hclge_get_cap(hdev);
5711 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
5713 goto err_cmd_uninit;
5716 ret = hclge_configure(hdev);
5718 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
5719 goto err_cmd_uninit;
5722 ret = hclge_init_msi(hdev);
5724 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
5725 goto err_cmd_uninit;
5728 ret = hclge_misc_irq_init(hdev);
5731 "Misc IRQ(vector0) init error, ret = %d.\n",
5733 goto err_msi_uninit;
5736 ret = hclge_alloc_tqps(hdev);
5738 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
5739 goto err_msi_irq_uninit;
5742 ret = hclge_alloc_vport(hdev);
5744 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
5745 goto err_msi_irq_uninit;
5748 ret = hclge_map_tqp(hdev);
5750 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
5751 goto err_msi_irq_uninit;
5754 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
5755 ret = hclge_mac_mdio_config(hdev);
5757 dev_err(&hdev->pdev->dev,
5758 "mdio config fail ret=%d\n", ret);
5759 goto err_msi_irq_uninit;
5763 ret = hclge_mac_init(hdev);
5765 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
5766 goto err_mdiobus_unreg;
5769 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
5771 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
5772 goto err_mdiobus_unreg;
5775 ret = hclge_init_vlan_config(hdev);
5777 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
5778 goto err_mdiobus_unreg;
5781 ret = hclge_tm_schd_init(hdev);
5783 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
5784 goto err_mdiobus_unreg;
5787 hclge_rss_init_cfg(hdev);
5788 ret = hclge_rss_init_hw(hdev);
5790 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
5791 goto err_mdiobus_unreg;
5794 ret = init_mgr_tbl(hdev);
5796 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
5797 goto err_mdiobus_unreg;
5800 hclge_dcb_ops_set(hdev);
5802 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
5803 INIT_WORK(&hdev->service_task, hclge_service_task);
5804 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
5805 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
5807 hclge_clear_all_event_cause(hdev);
5809 /* Enable MISC vector(vector0) */
5810 hclge_enable_vector(&hdev->misc_vector, true);
5812 hclge_state_init(hdev);
5814 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
5818 if (hdev->hw.mac.phydev)
5819 mdiobus_unregister(hdev->hw.mac.mdio_bus);
5821 hclge_misc_irq_uninit(hdev);
5823 pci_free_irq_vectors(pdev);
5825 hclge_destroy_cmd_queue(&hdev->hw);
5827 pcim_iounmap(pdev, hdev->hw.io_base);
5828 pci_clear_master(pdev);
5829 pci_release_regions(pdev);
5830 pci_disable_device(pdev);
5835 static void hclge_stats_clear(struct hclge_dev *hdev)
5837 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
5840 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
5842 struct hclge_dev *hdev = ae_dev->priv;
5843 struct pci_dev *pdev = ae_dev->pdev;
5846 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5848 hclge_stats_clear(hdev);
5849 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
5851 ret = hclge_cmd_init(hdev);
5853 dev_err(&pdev->dev, "Cmd queue init failed\n");
5857 ret = hclge_get_cap(hdev);
5859 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
5864 ret = hclge_configure(hdev);
5866 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
5870 ret = hclge_map_tqp(hdev);
5872 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
5876 ret = hclge_mac_init(hdev);
5878 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
5882 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
5884 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
5888 ret = hclge_init_vlan_config(hdev);
5890 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
5894 ret = hclge_tm_init_hw(hdev);
5896 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
5900 ret = hclge_rss_init_hw(hdev);
5902 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
5906 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
5912 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
5914 struct hclge_dev *hdev = ae_dev->priv;
5915 struct hclge_mac *mac = &hdev->hw.mac;
5917 hclge_state_uninit(hdev);
5920 mdiobus_unregister(mac->mdio_bus);
5922 /* Disable MISC vector(vector0) */
5923 hclge_enable_vector(&hdev->misc_vector, false);
5924 synchronize_irq(hdev->misc_vector.vector_irq);
5926 hclge_destroy_cmd_queue(&hdev->hw);
5927 hclge_misc_irq_uninit(hdev);
5928 hclge_pci_uninit(hdev);
5929 ae_dev->priv = NULL;
5932 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
5934 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5935 struct hclge_vport *vport = hclge_get_vport(handle);
5936 struct hclge_dev *hdev = vport->back;
5938 return min_t(u32, hdev->rss_size_max,
5939 vport->alloc_tqps / kinfo->num_tc);
5942 static void hclge_get_channels(struct hnae3_handle *handle,
5943 struct ethtool_channels *ch)
5945 ch->max_combined = hclge_get_max_channels(handle);
5946 ch->other_count = 1;
5948 ch->combined_count = handle->kinfo.rss_size;
5951 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
5952 u16 *free_tqps, u16 *max_rss_size)
5954 struct hclge_vport *vport = hclge_get_vport(handle);
5955 struct hclge_dev *hdev = vport->back;
5959 for (i = 0; i < hdev->num_tqps; i++) {
5960 if (!hdev->htqp[i].alloced)
5963 *free_tqps = temp_tqps;
5964 *max_rss_size = hdev->rss_size_max;
5967 static void hclge_release_tqp(struct hclge_vport *vport)
5969 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5970 struct hclge_dev *hdev = vport->back;
5973 for (i = 0; i < kinfo->num_tqps; i++) {
5974 struct hclge_tqp *tqp =
5975 container_of(kinfo->tqp[i], struct hclge_tqp, q);
5977 tqp->q.handle = NULL;
5978 tqp->q.tqp_index = 0;
5979 tqp->alloced = false;
5982 devm_kfree(&hdev->pdev->dev, kinfo->tqp);
5986 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
5988 struct hclge_vport *vport = hclge_get_vport(handle);
5989 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5990 struct hclge_dev *hdev = vport->back;
5991 int cur_rss_size = kinfo->rss_size;
5992 int cur_tqps = kinfo->num_tqps;
5993 u16 tc_offset[HCLGE_MAX_TC_NUM];
5994 u16 tc_valid[HCLGE_MAX_TC_NUM];
5995 u16 tc_size[HCLGE_MAX_TC_NUM];
6000 /* Free old tqps, and reallocate with new tqp number when nic setup */
6001 hclge_release_tqp(vport);
6003 ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
6005 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
6009 ret = hclge_map_tqp_to_vport(hdev, vport);
6011 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
6015 ret = hclge_tm_schd_init(hdev);
6017 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
6021 roundup_size = roundup_pow_of_two(kinfo->rss_size);
6022 roundup_size = ilog2(roundup_size);
6023 /* Set the RSS TC mode according to the new RSS size */
6024 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
6027 if (!(hdev->hw_tc_map & BIT(i)))
6031 tc_size[i] = roundup_size;
6032 tc_offset[i] = kinfo->rss_size * i;
6034 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
6038 /* Reinitializes the rss indirect table according to the new RSS size */
6039 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
6043 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
6044 rss_indir[i] = i % kinfo->rss_size;
6046 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
6048 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
6054 dev_info(&hdev->pdev->dev,
6055 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
6056 cur_rss_size, kinfo->rss_size,
6057 cur_tqps, kinfo->rss_size * kinfo->num_tc);
6062 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
6063 u32 *regs_num_64_bit)
6065 struct hclge_desc desc;
6069 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
6070 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6072 dev_err(&hdev->pdev->dev,
6073 "Query register number cmd failed, ret = %d.\n", ret);
6077 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
6078 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
6080 total_num = *regs_num_32_bit + *regs_num_64_bit;
6087 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
6090 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
6092 struct hclge_desc *desc;
6093 u32 *reg_val = data;
6102 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
6103 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
6107 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
6108 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
6110 dev_err(&hdev->pdev->dev,
6111 "Query 32 bit register cmd failed, ret = %d.\n", ret);
6116 for (i = 0; i < cmd_num; i++) {
6118 desc_data = (__le32 *)(&desc[i].data[0]);
6119 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
6121 desc_data = (__le32 *)(&desc[i]);
6122 n = HCLGE_32_BIT_REG_RTN_DATANUM;
6124 for (k = 0; k < n; k++) {
6125 *reg_val++ = le32_to_cpu(*desc_data++);
6137 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
6140 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
6142 struct hclge_desc *desc;
6143 u64 *reg_val = data;
6152 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
6153 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
6157 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
6158 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
6160 dev_err(&hdev->pdev->dev,
6161 "Query 64 bit register cmd failed, ret = %d.\n", ret);
6166 for (i = 0; i < cmd_num; i++) {
6168 desc_data = (__le64 *)(&desc[i].data[0]);
6169 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
6171 desc_data = (__le64 *)(&desc[i]);
6172 n = HCLGE_64_BIT_REG_RTN_DATANUM;
6174 for (k = 0; k < n; k++) {
6175 *reg_val++ = le64_to_cpu(*desc_data++);
6187 static int hclge_get_regs_len(struct hnae3_handle *handle)
6189 struct hclge_vport *vport = hclge_get_vport(handle);
6190 struct hclge_dev *hdev = vport->back;
6191 u32 regs_num_32_bit, regs_num_64_bit;
6194 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
6196 dev_err(&hdev->pdev->dev,
6197 "Get register number failed, ret = %d.\n", ret);
6201 return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
6204 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
6207 struct hclge_vport *vport = hclge_get_vport(handle);
6208 struct hclge_dev *hdev = vport->back;
6209 u32 regs_num_32_bit, regs_num_64_bit;
6212 *version = hdev->fw_version;
6214 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
6216 dev_err(&hdev->pdev->dev,
6217 "Get register number failed, ret = %d.\n", ret);
6221 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
6223 dev_err(&hdev->pdev->dev,
6224 "Get 32 bit register failed, ret = %d.\n", ret);
6228 data = (u32 *)data + regs_num_32_bit;
6229 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
6232 dev_err(&hdev->pdev->dev,
6233 "Get 64 bit register failed, ret = %d.\n", ret);
6236 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
6238 struct hclge_set_led_state_cmd *req;
6239 struct hclge_desc desc;
6242 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
6244 req = (struct hclge_set_led_state_cmd *)desc.data;
6245 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
6246 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
6248 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6250 dev_err(&hdev->pdev->dev,
6251 "Send set led state cmd error, ret =%d\n", ret);
6256 enum hclge_led_status {
6259 HCLGE_LED_NO_CHANGE = 0xFF,
6262 static int hclge_set_led_id(struct hnae3_handle *handle,
6263 enum ethtool_phys_id_state status)
6265 struct hclge_vport *vport = hclge_get_vport(handle);
6266 struct hclge_dev *hdev = vport->back;
6269 case ETHTOOL_ID_ACTIVE:
6270 return hclge_set_led_status(hdev, HCLGE_LED_ON);
6271 case ETHTOOL_ID_INACTIVE:
6272 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
6278 static void hclge_get_link_mode(struct hnae3_handle *handle,
6279 unsigned long *supported,
6280 unsigned long *advertising)
6282 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
6283 struct hclge_vport *vport = hclge_get_vport(handle);
6284 struct hclge_dev *hdev = vport->back;
6285 unsigned int idx = 0;
6287 for (; idx < size; idx++) {
6288 supported[idx] = hdev->hw.mac.supported[idx];
6289 advertising[idx] = hdev->hw.mac.advertising[idx];
6293 static void hclge_get_port_type(struct hnae3_handle *handle,
6296 struct hclge_vport *vport = hclge_get_vport(handle);
6297 struct hclge_dev *hdev = vport->back;
6298 u8 media_type = hdev->hw.mac.media_type;
6300 switch (media_type) {
6301 case HNAE3_MEDIA_TYPE_FIBER:
6302 *port_type = PORT_FIBRE;
6304 case HNAE3_MEDIA_TYPE_COPPER:
6305 *port_type = PORT_TP;
6307 case HNAE3_MEDIA_TYPE_UNKNOWN:
6309 *port_type = PORT_OTHER;
6314 static const struct hnae3_ae_ops hclge_ops = {
6315 .init_ae_dev = hclge_init_ae_dev,
6316 .uninit_ae_dev = hclge_uninit_ae_dev,
6317 .init_client_instance = hclge_init_client_instance,
6318 .uninit_client_instance = hclge_uninit_client_instance,
6319 .map_ring_to_vector = hclge_map_ring_to_vector,
6320 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
6321 .get_vector = hclge_get_vector,
6322 .put_vector = hclge_put_vector,
6323 .set_promisc_mode = hclge_set_promisc_mode,
6324 .set_loopback = hclge_set_loopback,
6325 .start = hclge_ae_start,
6326 .stop = hclge_ae_stop,
6327 .get_status = hclge_get_status,
6328 .get_ksettings_an_result = hclge_get_ksettings_an_result,
6329 .update_speed_duplex_h = hclge_update_speed_duplex_h,
6330 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
6331 .get_media_type = hclge_get_media_type,
6332 .get_rss_key_size = hclge_get_rss_key_size,
6333 .get_rss_indir_size = hclge_get_rss_indir_size,
6334 .get_rss = hclge_get_rss,
6335 .set_rss = hclge_set_rss,
6336 .set_rss_tuple = hclge_set_rss_tuple,
6337 .get_rss_tuple = hclge_get_rss_tuple,
6338 .get_tc_size = hclge_get_tc_size,
6339 .get_mac_addr = hclge_get_mac_addr,
6340 .set_mac_addr = hclge_set_mac_addr,
6341 .add_uc_addr = hclge_add_uc_addr,
6342 .rm_uc_addr = hclge_rm_uc_addr,
6343 .add_mc_addr = hclge_add_mc_addr,
6344 .rm_mc_addr = hclge_rm_mc_addr,
6345 .update_mta_status = hclge_update_mta_status,
6346 .set_autoneg = hclge_set_autoneg,
6347 .get_autoneg = hclge_get_autoneg,
6348 .get_pauseparam = hclge_get_pauseparam,
6349 .set_pauseparam = hclge_set_pauseparam,
6350 .set_mtu = hclge_set_mtu,
6351 .reset_queue = hclge_reset_tqp,
6352 .get_stats = hclge_get_stats,
6353 .update_stats = hclge_update_stats,
6354 .get_strings = hclge_get_strings,
6355 .get_sset_count = hclge_get_sset_count,
6356 .get_fw_version = hclge_get_fw_version,
6357 .get_mdix_mode = hclge_get_mdix_mode,
6358 .enable_vlan_filter = hclge_enable_vlan_filter,
6359 .set_vlan_filter = hclge_set_vlan_filter,
6360 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
6361 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
6362 .reset_event = hclge_reset_event,
6363 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
6364 .set_channels = hclge_set_channels,
6365 .get_channels = hclge_get_channels,
6366 .get_flowctrl_adv = hclge_get_flowctrl_adv,
6367 .get_regs_len = hclge_get_regs_len,
6368 .get_regs = hclge_get_regs,
6369 .set_led_id = hclge_set_led_id,
6370 .get_link_mode = hclge_get_link_mode,
6371 .get_port_type = hclge_get_port_type,
6374 static struct hnae3_ae_algo ae_algo = {
6376 .pdev_id_table = ae_algo_pci_tbl,
6379 static int hclge_init(void)
6381 pr_info("%s is initializing\n", HCLGE_NAME);
6383 hnae3_register_ae_algo(&ae_algo);
6388 static void hclge_exit(void)
6390 hnae3_unregister_ae_algo_prepare(&ae_algo);
6391 hnae3_unregister_ae_algo(&ae_algo);
6393 module_init(hclge_init);
6394 module_exit(hclge_exit);
6396 MODULE_LICENSE("GPL");
6397 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
6398 MODULE_DESCRIPTION("HCLGE Driver");
6399 MODULE_VERSION(HCLGE_MOD_VERSION);