2 * Copyright (C) 2015 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: ethtool support
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
43 #include <linux/kernel.h>
44 #include <linux/netdevice.h>
45 #include <linux/etherdevice.h>
46 #include <linux/interrupt.h>
47 #include <linux/pci.h>
48 #include <linux/ethtool.h>
50 #include "nfp_net_ctrl.h"
53 /* Support for stats. Returns netdev, driver, and device stats */
54 enum { NETDEV_ET_STATS, NFP_NET_DRV_ET_STATS, NFP_NET_DEV_ET_STATS };
55 struct _nfp_net_et_stats {
56 char name[ETH_GSTRING_LEN];
62 #define NN_ET_NETDEV_STAT(m) NETDEV_ET_STATS, \
63 FIELD_SIZEOF(struct net_device_stats, m), \
64 offsetof(struct net_device_stats, m)
65 /* For stats in the control BAR (other than Q stats) */
66 #define NN_ET_DEV_STAT(m) NFP_NET_DEV_ET_STATS, \
69 static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
71 {"rx_packets", NN_ET_NETDEV_STAT(rx_packets)},
72 {"tx_packets", NN_ET_NETDEV_STAT(tx_packets)},
73 {"rx_bytes", NN_ET_NETDEV_STAT(rx_bytes)},
74 {"tx_bytes", NN_ET_NETDEV_STAT(tx_bytes)},
75 {"rx_errors", NN_ET_NETDEV_STAT(rx_errors)},
76 {"tx_errors", NN_ET_NETDEV_STAT(tx_errors)},
77 {"rx_dropped", NN_ET_NETDEV_STAT(rx_dropped)},
78 {"tx_dropped", NN_ET_NETDEV_STAT(tx_dropped)},
79 {"multicast", NN_ET_NETDEV_STAT(multicast)},
80 {"collisions", NN_ET_NETDEV_STAT(collisions)},
81 {"rx_over_errors", NN_ET_NETDEV_STAT(rx_over_errors)},
82 {"rx_crc_errors", NN_ET_NETDEV_STAT(rx_crc_errors)},
83 {"rx_frame_errors", NN_ET_NETDEV_STAT(rx_frame_errors)},
84 {"rx_fifo_errors", NN_ET_NETDEV_STAT(rx_fifo_errors)},
85 {"rx_missed_errors", NN_ET_NETDEV_STAT(rx_missed_errors)},
86 {"tx_aborted_errors", NN_ET_NETDEV_STAT(tx_aborted_errors)},
87 {"tx_carrier_errors", NN_ET_NETDEV_STAT(tx_carrier_errors)},
88 {"tx_fifo_errors", NN_ET_NETDEV_STAT(tx_fifo_errors)},
89 /* Stats from the device */
90 {"dev_rx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_DISCARDS)},
91 {"dev_rx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_ERRORS)},
92 {"dev_rx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_OCTETS)},
93 {"dev_rx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_UC_OCTETS)},
94 {"dev_rx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_OCTETS)},
95 {"dev_rx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_OCTETS)},
96 {"dev_rx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_FRAMES)},
97 {"dev_rx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_FRAMES)},
98 {"dev_rx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_FRAMES)},
100 {"dev_tx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_DISCARDS)},
101 {"dev_tx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_ERRORS)},
102 {"dev_tx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_OCTETS)},
103 {"dev_tx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_UC_OCTETS)},
104 {"dev_tx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_OCTETS)},
105 {"dev_tx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_OCTETS)},
106 {"dev_tx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_FRAMES)},
107 {"dev_tx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_FRAMES)},
108 {"dev_tx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_FRAMES)},
110 {"bpf_pass_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_FRAMES)},
111 {"bpf_pass_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_BYTES)},
112 /* see comments in outro functions in nfp_bpf_jit.c to find out
113 * how different BPF modes use app-specific counters
115 {"bpf_app1_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_FRAMES)},
116 {"bpf_app1_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_BYTES)},
117 {"bpf_app2_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_FRAMES)},
118 {"bpf_app2_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_BYTES)},
119 {"bpf_app3_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_FRAMES)},
120 {"bpf_app3_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_BYTES)},
123 #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
124 #define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3)
125 #define NN_ET_RVEC_GATHER_STATS 7
126 #define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2)
127 #define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
128 NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
130 static void nfp_net_get_drvinfo(struct net_device *netdev,
131 struct ethtool_drvinfo *drvinfo)
133 struct nfp_net *nn = netdev_priv(netdev);
135 strlcpy(drvinfo->driver, nfp_net_driver_name, sizeof(drvinfo->driver));
136 strlcpy(drvinfo->version, nfp_net_driver_version,
137 sizeof(drvinfo->version));
139 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
141 nn->fw_ver.resv, nn->fw_ver.class,
142 nn->fw_ver.major, nn->fw_ver.minor);
143 strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
144 sizeof(drvinfo->bus_info));
146 drvinfo->n_stats = NN_ET_STATS_LEN;
147 drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ;
150 static void nfp_net_get_ringparam(struct net_device *netdev,
151 struct ethtool_ringparam *ring)
153 struct nfp_net *nn = netdev_priv(netdev);
155 ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
156 ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
157 ring->rx_pending = nn->rxd_cnt;
158 ring->tx_pending = nn->txd_cnt;
161 static int nfp_net_set_ringparam(struct net_device *netdev,
162 struct ethtool_ringparam *ring)
164 struct nfp_net *nn = netdev_priv(netdev);
165 u32 rxd_cnt, txd_cnt;
167 /* We don't have separate queues/rings for small/large frames. */
168 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
171 /* Round up to supported values */
172 rxd_cnt = roundup_pow_of_two(ring->rx_pending);
173 txd_cnt = roundup_pow_of_two(ring->tx_pending);
175 if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
176 txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
179 if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
182 nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
183 nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
185 return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
188 static void nfp_net_get_strings(struct net_device *netdev,
189 u32 stringset, u8 *data)
191 struct nfp_net *nn = netdev_priv(netdev);
197 for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) {
198 memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN);
199 p += ETH_GSTRING_LEN;
201 for (i = 0; i < nn->num_r_vecs; i++) {
202 sprintf(p, "rvec_%u_rx_pkts", i);
203 p += ETH_GSTRING_LEN;
204 sprintf(p, "rvec_%u_tx_pkts", i);
205 p += ETH_GSTRING_LEN;
206 sprintf(p, "rvec_%u_tx_busy", i);
207 p += ETH_GSTRING_LEN;
209 strncpy(p, "hw_rx_csum_ok", ETH_GSTRING_LEN);
210 p += ETH_GSTRING_LEN;
211 strncpy(p, "hw_rx_csum_inner_ok", ETH_GSTRING_LEN);
212 p += ETH_GSTRING_LEN;
213 strncpy(p, "hw_rx_csum_err", ETH_GSTRING_LEN);
214 p += ETH_GSTRING_LEN;
215 strncpy(p, "hw_tx_csum", ETH_GSTRING_LEN);
216 p += ETH_GSTRING_LEN;
217 strncpy(p, "hw_tx_inner_csum", ETH_GSTRING_LEN);
218 p += ETH_GSTRING_LEN;
219 strncpy(p, "tx_gather", ETH_GSTRING_LEN);
220 p += ETH_GSTRING_LEN;
221 strncpy(p, "tx_lso", ETH_GSTRING_LEN);
222 p += ETH_GSTRING_LEN;
223 for (i = 0; i < nn->num_tx_rings; i++) {
224 sprintf(p, "txq_%u_pkts", i);
225 p += ETH_GSTRING_LEN;
226 sprintf(p, "txq_%u_bytes", i);
227 p += ETH_GSTRING_LEN;
229 for (i = 0; i < nn->num_rx_rings; i++) {
230 sprintf(p, "rxq_%u_pkts", i);
231 p += ETH_GSTRING_LEN;
232 sprintf(p, "rxq_%u_bytes", i);
233 p += ETH_GSTRING_LEN;
239 static void nfp_net_get_stats(struct net_device *netdev,
240 struct ethtool_stats *stats, u64 *data)
242 u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {};
243 struct nfp_net *nn = netdev_priv(netdev);
244 struct rtnl_link_stats64 *netdev_stats;
245 struct rtnl_link_stats64 temp = {};
246 u64 tmp[NN_ET_RVEC_GATHER_STATS];
251 netdev_stats = dev_get_stats(netdev, &temp);
253 for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) {
254 switch (nfp_net_et_stats[i].type) {
255 case NETDEV_ET_STATS:
256 p = (char *)netdev_stats + nfp_net_et_stats[i].off;
257 data[i] = nfp_net_et_stats[i].sz == sizeof(u64) ?
258 *(u64 *)p : *(u32 *)p;
261 case NFP_NET_DEV_ET_STATS:
262 io_p = nn->ctrl_bar + nfp_net_et_stats[i].off;
263 data[i] = readq(io_p);
267 for (j = 0; j < nn->num_r_vecs; j++) {
271 start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync);
272 data[i++] = nn->r_vecs[j].rx_pkts;
273 tmp[0] = nn->r_vecs[j].hw_csum_rx_ok;
274 tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok;
275 tmp[2] = nn->r_vecs[j].hw_csum_rx_error;
276 } while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start));
279 start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync);
280 data[i++] = nn->r_vecs[j].tx_pkts;
281 data[i++] = nn->r_vecs[j].tx_busy;
282 tmp[3] = nn->r_vecs[j].hw_csum_tx;
283 tmp[4] = nn->r_vecs[j].hw_csum_tx_inner;
284 tmp[5] = nn->r_vecs[j].tx_gather;
285 tmp[6] = nn->r_vecs[j].tx_lso;
286 } while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start));
288 for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++)
289 gathered_stats[k] += tmp[k];
291 for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
292 data[i++] = gathered_stats[j];
293 for (j = 0; j < nn->num_tx_rings; j++) {
294 io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
295 data[i++] = readq(io_p);
296 io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
297 data[i++] = readq(io_p);
299 for (j = 0; j < nn->num_rx_rings; j++) {
300 io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
301 data[i++] = readq(io_p);
302 io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
303 data[i++] = readq(io_p);
307 static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
309 struct nfp_net *nn = netdev_priv(netdev);
313 return NN_ET_STATS_LEN;
319 /* RX network flow classification (RSS, filters, etc)
321 static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
323 static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = {
324 [TCP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_TCP,
325 [TCP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_TCP,
326 [UDP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_UDP,
327 [UDP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_UDP,
328 [IPV4_FLOW] = NFP_NET_CFG_RSS_IPV4,
329 [IPV6_FLOW] = NFP_NET_CFG_RSS_IPV6,
332 if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp))
335 return xlate_ethtool_to_nfp[flow_type];
338 static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
339 struct ethtool_rxnfc *cmd)
345 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
348 nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type);
352 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
353 if (nn->rss_cfg & nfp_rss_flag)
354 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
359 static int nfp_net_get_rxnfc(struct net_device *netdev,
360 struct ethtool_rxnfc *cmd, u32 *rule_locs)
362 struct nfp_net *nn = netdev_priv(netdev);
365 case ETHTOOL_GRXRINGS:
366 cmd->data = nn->num_rx_rings;
369 return nfp_net_get_rss_hash_opts(nn, cmd);
375 static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
376 struct ethtool_rxnfc *nfc)
378 u32 new_rss_cfg = nn->rss_cfg;
382 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
385 /* RSS only supports IP SA/DA and L4 src/dst ports */
386 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
387 RXH_L4_B_0_1 | RXH_L4_B_2_3))
390 /* We need at least the IP SA/DA fields for hashing */
391 if (!(nfc->data & RXH_IP_SRC) ||
392 !(nfc->data & RXH_IP_DST))
395 nfp_rss_flag = ethtool_flow_to_nfp_flag(nfc->flow_type);
399 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
401 new_rss_cfg &= ~nfp_rss_flag;
403 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
404 new_rss_cfg |= nfp_rss_flag;
410 new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ;
411 new_rss_cfg |= NFP_NET_CFG_RSS_MASK;
413 if (new_rss_cfg == nn->rss_cfg)
416 writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL);
417 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
421 nn->rss_cfg = new_rss_cfg;
423 nn_dbg(nn, "Changed RSS config to 0x%x\n", nn->rss_cfg);
427 static int nfp_net_set_rxnfc(struct net_device *netdev,
428 struct ethtool_rxnfc *cmd)
430 struct nfp_net *nn = netdev_priv(netdev);
434 return nfp_net_set_rss_hash_opt(nn, cmd);
440 static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
442 struct nfp_net *nn = netdev_priv(netdev);
444 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
447 return ARRAY_SIZE(nn->rss_itbl);
450 static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
452 return NFP_NET_CFG_RSS_KEY_SZ;
455 static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
458 struct nfp_net *nn = netdev_priv(netdev);
461 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
465 for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
466 indir[i] = nn->rss_itbl[i];
468 memcpy(key, nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
470 *hfunc = ETH_RSS_HASH_TOP;
475 static int nfp_net_set_rxfh(struct net_device *netdev,
476 const u32 *indir, const u8 *key,
479 struct nfp_net *nn = netdev_priv(netdev);
482 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
483 !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == ETH_RSS_HASH_TOP))
490 memcpy(nn->rss_key, key, NFP_NET_CFG_RSS_KEY_SZ);
491 nfp_net_rss_write_key(nn);
494 for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
495 nn->rss_itbl[i] = indir[i];
497 nfp_net_rss_write_itbl(nn);
500 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
503 /* Dump BAR registers
505 static int nfp_net_get_regs_len(struct net_device *netdev)
507 return NFP_NET_CFG_BAR_SZ;
510 static void nfp_net_get_regs(struct net_device *netdev,
511 struct ethtool_regs *regs, void *p)
513 struct nfp_net *nn = netdev_priv(netdev);
517 regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
519 for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
520 regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32)));
523 static int nfp_net_get_coalesce(struct net_device *netdev,
524 struct ethtool_coalesce *ec)
526 struct nfp_net *nn = netdev_priv(netdev);
528 if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
531 ec->rx_coalesce_usecs = nn->rx_coalesce_usecs;
532 ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames;
533 ec->tx_coalesce_usecs = nn->tx_coalesce_usecs;
534 ec->tx_max_coalesced_frames = nn->tx_coalesce_max_frames;
539 static int nfp_net_set_coalesce(struct net_device *netdev,
540 struct ethtool_coalesce *ec)
542 struct nfp_net *nn = netdev_priv(netdev);
545 if (ec->rx_coalesce_usecs_irq ||
546 ec->rx_max_coalesced_frames_irq ||
547 ec->tx_coalesce_usecs_irq ||
548 ec->tx_max_coalesced_frames_irq ||
549 ec->stats_block_coalesce_usecs ||
550 ec->use_adaptive_rx_coalesce ||
551 ec->use_adaptive_tx_coalesce ||
553 ec->rx_coalesce_usecs_low ||
554 ec->rx_max_coalesced_frames_low ||
555 ec->tx_coalesce_usecs_low ||
556 ec->tx_max_coalesced_frames_low ||
558 ec->rx_coalesce_usecs_high ||
559 ec->rx_max_coalesced_frames_high ||
560 ec->tx_coalesce_usecs_high ||
561 ec->tx_max_coalesced_frames_high ||
562 ec->rate_sample_interval)
565 /* Compute factor used to convert coalesce '_usecs' parameters to
566 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
569 factor = nn->me_freq_mhz / 16;
571 /* Each pair of (usecs, max_frames) fields specifies that interrupts
572 * should be coalesced until
573 * (usecs > 0 && time_since_first_completion >= usecs) ||
574 * (max_frames > 0 && completed_frames >= max_frames)
576 * It is illegal to set both usecs and max_frames to zero as this would
577 * cause interrupts to never be generated. To disable coalescing, set
578 * usecs = 0 and max_frames = 1.
580 * Some implementations ignore the value of max_frames and use the
581 * condition time_since_first_completion >= usecs
584 if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
587 /* ensure valid configuration */
588 if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames)
591 if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames)
594 if (ec->rx_coalesce_usecs * factor >= ((1 << 16) - 1))
597 if (ec->tx_coalesce_usecs * factor >= ((1 << 16) - 1))
600 if (ec->rx_max_coalesced_frames >= ((1 << 16) - 1))
603 if (ec->tx_max_coalesced_frames >= ((1 << 16) - 1))
606 /* configuration is valid */
607 nn->rx_coalesce_usecs = ec->rx_coalesce_usecs;
608 nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames;
609 nn->tx_coalesce_usecs = ec->tx_coalesce_usecs;
610 nn->tx_coalesce_max_frames = ec->tx_max_coalesced_frames;
612 /* write configuration to device */
613 nfp_net_coalesce_write_cfg(nn);
614 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
617 static const struct ethtool_ops nfp_net_ethtool_ops = {
618 .get_drvinfo = nfp_net_get_drvinfo,
619 .get_link = ethtool_op_get_link,
620 .get_ringparam = nfp_net_get_ringparam,
621 .set_ringparam = nfp_net_set_ringparam,
622 .get_strings = nfp_net_get_strings,
623 .get_ethtool_stats = nfp_net_get_stats,
624 .get_sset_count = nfp_net_get_sset_count,
625 .get_rxnfc = nfp_net_get_rxnfc,
626 .set_rxnfc = nfp_net_set_rxnfc,
627 .get_rxfh_indir_size = nfp_net_get_rxfh_indir_size,
628 .get_rxfh_key_size = nfp_net_get_rxfh_key_size,
629 .get_rxfh = nfp_net_get_rxfh,
630 .set_rxfh = nfp_net_set_rxfh,
631 .get_regs_len = nfp_net_get_regs_len,
632 .get_regs = nfp_net_get_regs,
633 .get_coalesce = nfp_net_get_coalesce,
634 .set_coalesce = nfp_net_set_coalesce,
637 void nfp_net_set_ethtool_ops(struct net_device *netdev)
639 netdev->ethtool_ops = &nfp_net_ethtool_ops;