1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
11 #include "i40e_diag.h"
12 #include <net/udp_tunnel.h>
13 /* All i40e tracepoints are defined by the include below, which
14 * must be included exactly once across the whole kernel with
15 * CREATE_TRACE_POINTS defined
17 #define CREATE_TRACE_POINTS
18 #include "i40e_trace.h"
20 const char i40e_driver_name[] = "i40e";
21 static const char i40e_driver_string[] =
22 "Intel(R) Ethernet Connection XL710 Network Driver";
26 #define DRV_VERSION_MAJOR 2
27 #define DRV_VERSION_MINOR 3
28 #define DRV_VERSION_BUILD 2
29 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
30 __stringify(DRV_VERSION_MINOR) "." \
31 __stringify(DRV_VERSION_BUILD) DRV_KERN
32 const char i40e_driver_version_str[] = DRV_VERSION;
33 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
35 /* a bit of forward declarations */
36 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
37 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
38 static int i40e_add_vsi(struct i40e_vsi *vsi);
39 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
40 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
41 static int i40e_setup_misc_vector(struct i40e_pf *pf);
42 static void i40e_determine_queue_usage(struct i40e_pf *pf);
43 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
44 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
45 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
47 static int i40e_reset(struct i40e_pf *pf);
48 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
49 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
50 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
51 static int i40e_get_capabilities(struct i40e_pf *pf,
52 enum i40e_admin_queue_opc list_type);
55 /* i40e_pci_tbl - PCI Device ID Table
57 * Last entry must be all 0s
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
60 * Class, Class Mask, private data (not used) }
62 static const struct pci_device_id i40e_pci_tbl[] = {
63 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
64 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
82 /* required last entry */
85 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
87 #define I40E_MAX_VF_COUNT 128
88 static int debug = -1;
89 module_param(debug, uint, 0);
90 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
92 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
93 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
94 MODULE_LICENSE("GPL");
95 MODULE_VERSION(DRV_VERSION);
97 static struct workqueue_struct *i40e_wq;
99 static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
100 struct net_device *netdev, int delta)
102 struct netdev_hw_addr *ha;
107 netdev_for_each_mc_addr(ha, netdev) {
108 if (ether_addr_equal(ha->addr, f->macaddr)) {
109 ha->refcount += delta;
110 if (ha->refcount <= 0)
118 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
119 * @hw: pointer to the HW structure
120 * @mem: ptr to mem struct to fill out
121 * @size: size of memory requested
122 * @alignment: what to align the allocation to
124 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
125 u64 size, u32 alignment)
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
129 mem->size = ALIGN(size, alignment);
130 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
131 &mem->pa, GFP_KERNEL);
139 * i40e_free_dma_mem_d - OS specific memory free for shared code
140 * @hw: pointer to the HW structure
141 * @mem: ptr to mem struct to free
143 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
145 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
147 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
156 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
157 * @hw: pointer to the HW structure
158 * @mem: ptr to mem struct to fill out
159 * @size: size of memory requested
161 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
165 mem->va = kzalloc(size, GFP_KERNEL);
174 * i40e_free_virt_mem_d - OS specific memory free for shared code
175 * @hw: pointer to the HW structure
176 * @mem: ptr to mem struct to free
178 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
180 /* it's ok to kfree a NULL pointer */
189 * i40e_get_lump - find a lump of free generic resource
190 * @pf: board private structure
191 * @pile: the pile of resource to search
192 * @needed: the number of items needed
193 * @id: an owner id to stick on the items assigned
195 * Returns the base item index of the lump, or negative for error
197 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
203 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
204 dev_info(&pf->pdev->dev,
205 "param err: pile=%s needed=%d id=0x%04x\n",
206 pile ? "<valid>" : "<null>", needed, id);
210 /* Allocate last queue in the pile for FDIR VSI queue
211 * so it doesn't fragment the qp_pile
213 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
214 if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
215 dev_err(&pf->pdev->dev,
216 "Cannot allocate queue %d for I40E_VSI_FDIR\n",
217 pile->num_entries - 1);
220 pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
221 return pile->num_entries - 1;
225 while (i < pile->num_entries) {
226 /* skip already allocated entries */
227 if (pile->list[i] & I40E_PILE_VALID_BIT) {
232 /* do we have enough in this lump? */
233 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
234 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
239 /* there was enough, so assign it to the requestor */
240 for (j = 0; j < needed; j++)
241 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
246 /* not enough, so skip over it and continue looking */
254 * i40e_put_lump - return a lump of generic resource
255 * @pile: the pile of resource to search
256 * @index: the base item index
257 * @id: the owner id of the items assigned
259 * Returns the count of items in the lump
261 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
263 int valid_id = (id | I40E_PILE_VALID_BIT);
267 if (!pile || index >= pile->num_entries)
271 i < pile->num_entries && pile->list[i] == valid_id;
282 * i40e_find_vsi_from_id - searches for the vsi with the given id
283 * @pf: the pf structure to search for the vsi
284 * @id: id of the vsi it is searching for
286 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
290 for (i = 0; i < pf->num_alloc_vsi; i++)
291 if (pf->vsi[i] && (pf->vsi[i]->id == id))
298 * i40e_service_event_schedule - Schedule the service task to wake up
299 * @pf: board private structure
301 * If not already scheduled, this puts the task into the work queue
303 void i40e_service_event_schedule(struct i40e_pf *pf)
305 if (!test_bit(__I40E_DOWN, pf->state) &&
306 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
307 queue_work(i40e_wq, &pf->service_task);
311 * i40e_tx_timeout - Respond to a Tx Hang
312 * @netdev: network interface device structure
314 * If any port has noticed a Tx timeout, it is likely that the whole
315 * device is munged, not just the one netdev port, so go for the full
318 static void i40e_tx_timeout(struct net_device *netdev)
320 struct i40e_netdev_priv *np = netdev_priv(netdev);
321 struct i40e_vsi *vsi = np->vsi;
322 struct i40e_pf *pf = vsi->back;
323 struct i40e_ring *tx_ring = NULL;
324 unsigned int i, hung_queue = 0;
327 pf->tx_timeout_count++;
329 /* find the stopped queue the same way the stack does */
330 for (i = 0; i < netdev->num_tx_queues; i++) {
331 struct netdev_queue *q;
332 unsigned long trans_start;
334 q = netdev_get_tx_queue(netdev, i);
335 trans_start = q->trans_start;
336 if (netif_xmit_stopped(q) &&
338 (trans_start + netdev->watchdog_timeo))) {
344 if (i == netdev->num_tx_queues) {
345 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
347 /* now that we have an index, find the tx_ring struct */
348 for (i = 0; i < vsi->num_queue_pairs; i++) {
349 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
351 vsi->tx_rings[i]->queue_index) {
352 tx_ring = vsi->tx_rings[i];
359 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
360 pf->tx_timeout_recovery_level = 1; /* reset after some time */
361 else if (time_before(jiffies,
362 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
363 return; /* don't do any new action before the next timeout */
365 /* don't kick off another recovery if one is already pending */
366 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
370 head = i40e_get_head(tx_ring);
371 /* Read interrupt register */
372 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
374 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
375 tx_ring->vsi->base_vector - 1));
377 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
379 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
380 vsi->seid, hung_queue, tx_ring->next_to_clean,
381 head, tx_ring->next_to_use,
382 readl(tx_ring->tail), val);
385 pf->tx_timeout_last_recovery = jiffies;
386 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
387 pf->tx_timeout_recovery_level, hung_queue);
389 switch (pf->tx_timeout_recovery_level) {
391 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
394 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
397 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
400 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
401 set_bit(__I40E_DOWN_REQUESTED, pf->state);
402 set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
406 i40e_service_event_schedule(pf);
407 pf->tx_timeout_recovery_level++;
411 * i40e_get_vsi_stats_struct - Get System Network Statistics
412 * @vsi: the VSI we care about
414 * Returns the address of the device statistics structure.
415 * The statistics are actually updated from the service task.
417 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
419 return &vsi->net_stats;
423 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
424 * @ring: Tx ring to get statistics from
425 * @stats: statistics entry to be updated
427 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
428 struct rtnl_link_stats64 *stats)
434 start = u64_stats_fetch_begin_irq(&ring->syncp);
435 packets = ring->stats.packets;
436 bytes = ring->stats.bytes;
437 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
439 stats->tx_packets += packets;
440 stats->tx_bytes += bytes;
444 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
445 * @netdev: network interface device structure
446 * @stats: data structure to store statistics
448 * Returns the address of the device statistics structure.
449 * The statistics are actually updated from the service task.
451 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
452 struct rtnl_link_stats64 *stats)
454 struct i40e_netdev_priv *np = netdev_priv(netdev);
455 struct i40e_vsi *vsi = np->vsi;
456 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
457 struct i40e_ring *ring;
460 if (test_bit(__I40E_VSI_DOWN, vsi->state))
467 for (i = 0; i < vsi->num_queue_pairs; i++) {
471 ring = READ_ONCE(vsi->tx_rings[i]);
474 i40e_get_netdev_stats_struct_tx(ring, stats);
476 if (i40e_enabled_xdp_vsi(vsi)) {
477 ring = READ_ONCE(vsi->xdp_rings[i]);
480 i40e_get_netdev_stats_struct_tx(ring, stats);
483 ring = READ_ONCE(vsi->rx_rings[i]);
487 start = u64_stats_fetch_begin_irq(&ring->syncp);
488 packets = ring->stats.packets;
489 bytes = ring->stats.bytes;
490 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
492 stats->rx_packets += packets;
493 stats->rx_bytes += bytes;
498 /* following stats updated by i40e_watchdog_subtask() */
499 stats->multicast = vsi_stats->multicast;
500 stats->tx_errors = vsi_stats->tx_errors;
501 stats->tx_dropped = vsi_stats->tx_dropped;
502 stats->rx_errors = vsi_stats->rx_errors;
503 stats->rx_dropped = vsi_stats->rx_dropped;
504 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
505 stats->rx_length_errors = vsi_stats->rx_length_errors;
509 * i40e_vsi_reset_stats - Resets all stats of the given vsi
510 * @vsi: the VSI to have its stats reset
512 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
514 struct rtnl_link_stats64 *ns;
520 ns = i40e_get_vsi_stats_struct(vsi);
521 memset(ns, 0, sizeof(*ns));
522 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
523 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
524 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
525 if (vsi->rx_rings && vsi->rx_rings[0]) {
526 for (i = 0; i < vsi->num_queue_pairs; i++) {
527 memset(&vsi->rx_rings[i]->stats, 0,
528 sizeof(vsi->rx_rings[i]->stats));
529 memset(&vsi->rx_rings[i]->rx_stats, 0,
530 sizeof(vsi->rx_rings[i]->rx_stats));
531 memset(&vsi->tx_rings[i]->stats, 0,
532 sizeof(vsi->tx_rings[i]->stats));
533 memset(&vsi->tx_rings[i]->tx_stats, 0,
534 sizeof(vsi->tx_rings[i]->tx_stats));
537 vsi->stat_offsets_loaded = false;
541 * i40e_pf_reset_stats - Reset all of the stats for the given PF
542 * @pf: the PF to be reset
544 void i40e_pf_reset_stats(struct i40e_pf *pf)
548 memset(&pf->stats, 0, sizeof(pf->stats));
549 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
550 pf->stat_offsets_loaded = false;
552 for (i = 0; i < I40E_MAX_VEB; i++) {
554 memset(&pf->veb[i]->stats, 0,
555 sizeof(pf->veb[i]->stats));
556 memset(&pf->veb[i]->stats_offsets, 0,
557 sizeof(pf->veb[i]->stats_offsets));
558 pf->veb[i]->stat_offsets_loaded = false;
561 pf->hw_csum_rx_error = 0;
565 * i40e_stat_update48 - read and update a 48 bit stat from the chip
566 * @hw: ptr to the hardware info
567 * @hireg: the high 32 bit reg to read
568 * @loreg: the low 32 bit reg to read
569 * @offset_loaded: has the initial offset been loaded yet
570 * @offset: ptr to current offset value
571 * @stat: ptr to the stat
573 * Since the device stats are not reset at PFReset, they likely will not
574 * be zeroed when the driver starts. We'll save the first values read
575 * and use them as offsets to be subtracted from the raw values in order
576 * to report stats that count from zero. In the process, we also manage
577 * the potential roll-over.
579 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
580 bool offset_loaded, u64 *offset, u64 *stat)
584 if (hw->device_id == I40E_DEV_ID_QEMU) {
585 new_data = rd32(hw, loreg);
586 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
588 new_data = rd64(hw, loreg);
592 if (likely(new_data >= *offset))
593 *stat = new_data - *offset;
595 *stat = (new_data + BIT_ULL(48)) - *offset;
596 *stat &= 0xFFFFFFFFFFFFULL;
600 * i40e_stat_update32 - read and update a 32 bit stat from the chip
601 * @hw: ptr to the hardware info
602 * @reg: the hw reg to read
603 * @offset_loaded: has the initial offset been loaded yet
604 * @offset: ptr to current offset value
605 * @stat: ptr to the stat
607 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
608 bool offset_loaded, u64 *offset, u64 *stat)
612 new_data = rd32(hw, reg);
615 if (likely(new_data >= *offset))
616 *stat = (u32)(new_data - *offset);
618 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
622 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
623 * @hw: ptr to the hardware info
624 * @reg: the hw reg to read and clear
625 * @stat: ptr to the stat
627 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
629 u32 new_data = rd32(hw, reg);
631 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
636 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
637 * @vsi: the VSI to be updated
639 void i40e_update_eth_stats(struct i40e_vsi *vsi)
641 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
642 struct i40e_pf *pf = vsi->back;
643 struct i40e_hw *hw = &pf->hw;
644 struct i40e_eth_stats *oes;
645 struct i40e_eth_stats *es; /* device's eth stats */
647 es = &vsi->eth_stats;
648 oes = &vsi->eth_stats_offsets;
650 /* Gather up the stats that the hw collects */
651 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
652 vsi->stat_offsets_loaded,
653 &oes->tx_errors, &es->tx_errors);
654 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
655 vsi->stat_offsets_loaded,
656 &oes->rx_discards, &es->rx_discards);
657 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
658 vsi->stat_offsets_loaded,
659 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
660 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
661 vsi->stat_offsets_loaded,
662 &oes->tx_errors, &es->tx_errors);
664 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
665 I40E_GLV_GORCL(stat_idx),
666 vsi->stat_offsets_loaded,
667 &oes->rx_bytes, &es->rx_bytes);
668 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
669 I40E_GLV_UPRCL(stat_idx),
670 vsi->stat_offsets_loaded,
671 &oes->rx_unicast, &es->rx_unicast);
672 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
673 I40E_GLV_MPRCL(stat_idx),
674 vsi->stat_offsets_loaded,
675 &oes->rx_multicast, &es->rx_multicast);
676 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
677 I40E_GLV_BPRCL(stat_idx),
678 vsi->stat_offsets_loaded,
679 &oes->rx_broadcast, &es->rx_broadcast);
681 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
682 I40E_GLV_GOTCL(stat_idx),
683 vsi->stat_offsets_loaded,
684 &oes->tx_bytes, &es->tx_bytes);
685 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
686 I40E_GLV_UPTCL(stat_idx),
687 vsi->stat_offsets_loaded,
688 &oes->tx_unicast, &es->tx_unicast);
689 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
690 I40E_GLV_MPTCL(stat_idx),
691 vsi->stat_offsets_loaded,
692 &oes->tx_multicast, &es->tx_multicast);
693 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
694 I40E_GLV_BPTCL(stat_idx),
695 vsi->stat_offsets_loaded,
696 &oes->tx_broadcast, &es->tx_broadcast);
697 vsi->stat_offsets_loaded = true;
701 * i40e_update_veb_stats - Update Switch component statistics
702 * @veb: the VEB being updated
704 static void i40e_update_veb_stats(struct i40e_veb *veb)
706 struct i40e_pf *pf = veb->pf;
707 struct i40e_hw *hw = &pf->hw;
708 struct i40e_eth_stats *oes;
709 struct i40e_eth_stats *es; /* device's eth stats */
710 struct i40e_veb_tc_stats *veb_oes;
711 struct i40e_veb_tc_stats *veb_es;
714 idx = veb->stats_idx;
716 oes = &veb->stats_offsets;
717 veb_es = &veb->tc_stats;
718 veb_oes = &veb->tc_stats_offsets;
720 /* Gather up the stats that the hw collects */
721 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
722 veb->stat_offsets_loaded,
723 &oes->tx_discards, &es->tx_discards);
724 if (hw->revision_id > 0)
725 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
726 veb->stat_offsets_loaded,
727 &oes->rx_unknown_protocol,
728 &es->rx_unknown_protocol);
729 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
730 veb->stat_offsets_loaded,
731 &oes->rx_bytes, &es->rx_bytes);
732 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
733 veb->stat_offsets_loaded,
734 &oes->rx_unicast, &es->rx_unicast);
735 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
736 veb->stat_offsets_loaded,
737 &oes->rx_multicast, &es->rx_multicast);
738 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
739 veb->stat_offsets_loaded,
740 &oes->rx_broadcast, &es->rx_broadcast);
742 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
743 veb->stat_offsets_loaded,
744 &oes->tx_bytes, &es->tx_bytes);
745 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
746 veb->stat_offsets_loaded,
747 &oes->tx_unicast, &es->tx_unicast);
748 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
749 veb->stat_offsets_loaded,
750 &oes->tx_multicast, &es->tx_multicast);
751 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
752 veb->stat_offsets_loaded,
753 &oes->tx_broadcast, &es->tx_broadcast);
754 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
755 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
756 I40E_GLVEBTC_RPCL(i, idx),
757 veb->stat_offsets_loaded,
758 &veb_oes->tc_rx_packets[i],
759 &veb_es->tc_rx_packets[i]);
760 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
761 I40E_GLVEBTC_RBCL(i, idx),
762 veb->stat_offsets_loaded,
763 &veb_oes->tc_rx_bytes[i],
764 &veb_es->tc_rx_bytes[i]);
765 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
766 I40E_GLVEBTC_TPCL(i, idx),
767 veb->stat_offsets_loaded,
768 &veb_oes->tc_tx_packets[i],
769 &veb_es->tc_tx_packets[i]);
770 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
771 I40E_GLVEBTC_TBCL(i, idx),
772 veb->stat_offsets_loaded,
773 &veb_oes->tc_tx_bytes[i],
774 &veb_es->tc_tx_bytes[i]);
776 veb->stat_offsets_loaded = true;
780 * i40e_update_vsi_stats - Update the vsi statistics counters.
781 * @vsi: the VSI to be updated
783 * There are a few instances where we store the same stat in a
784 * couple of different structs. This is partly because we have
785 * the netdev stats that need to be filled out, which is slightly
786 * different from the "eth_stats" defined by the chip and used in
787 * VF communications. We sort it out here.
789 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
791 struct i40e_pf *pf = vsi->back;
792 struct rtnl_link_stats64 *ons;
793 struct rtnl_link_stats64 *ns; /* netdev stats */
794 struct i40e_eth_stats *oes;
795 struct i40e_eth_stats *es; /* device's eth stats */
796 u64 tx_restart, tx_busy;
807 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
808 test_bit(__I40E_CONFIG_BUSY, pf->state))
811 ns = i40e_get_vsi_stats_struct(vsi);
812 ons = &vsi->net_stats_offsets;
813 es = &vsi->eth_stats;
814 oes = &vsi->eth_stats_offsets;
816 /* Gather up the netdev and vsi stats that the driver collects
817 * on the fly during packet processing
821 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
825 for (q = 0; q < vsi->num_queue_pairs; q++) {
827 p = READ_ONCE(vsi->tx_rings[q]);
832 start = u64_stats_fetch_begin_irq(&p->syncp);
833 packets = p->stats.packets;
834 bytes = p->stats.bytes;
835 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
838 tx_restart += p->tx_stats.restart_queue;
839 tx_busy += p->tx_stats.tx_busy;
840 tx_linearize += p->tx_stats.tx_linearize;
841 tx_force_wb += p->tx_stats.tx_force_wb;
844 p = READ_ONCE(vsi->rx_rings[q]);
849 start = u64_stats_fetch_begin_irq(&p->syncp);
850 packets = p->stats.packets;
851 bytes = p->stats.bytes;
852 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
855 rx_buf += p->rx_stats.alloc_buff_failed;
856 rx_page += p->rx_stats.alloc_page_failed;
859 vsi->tx_restart = tx_restart;
860 vsi->tx_busy = tx_busy;
861 vsi->tx_linearize = tx_linearize;
862 vsi->tx_force_wb = tx_force_wb;
863 vsi->rx_page_failed = rx_page;
864 vsi->rx_buf_failed = rx_buf;
866 ns->rx_packets = rx_p;
868 ns->tx_packets = tx_p;
871 /* update netdev stats from eth stats */
872 i40e_update_eth_stats(vsi);
873 ons->tx_errors = oes->tx_errors;
874 ns->tx_errors = es->tx_errors;
875 ons->multicast = oes->rx_multicast;
876 ns->multicast = es->rx_multicast;
877 ons->rx_dropped = oes->rx_discards;
878 ns->rx_dropped = es->rx_discards;
879 ons->tx_dropped = oes->tx_discards;
880 ns->tx_dropped = es->tx_discards;
882 /* pull in a couple PF stats if this is the main vsi */
883 if (vsi == pf->vsi[pf->lan_vsi]) {
884 ns->rx_crc_errors = pf->stats.crc_errors;
885 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
886 ns->rx_length_errors = pf->stats.rx_length_errors;
891 * i40e_update_pf_stats - Update the PF statistics counters.
892 * @pf: the PF to be updated
894 static void i40e_update_pf_stats(struct i40e_pf *pf)
896 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
897 struct i40e_hw_port_stats *nsd = &pf->stats;
898 struct i40e_hw *hw = &pf->hw;
902 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
903 I40E_GLPRT_GORCL(hw->port),
904 pf->stat_offsets_loaded,
905 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
906 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
907 I40E_GLPRT_GOTCL(hw->port),
908 pf->stat_offsets_loaded,
909 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
910 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
911 pf->stat_offsets_loaded,
912 &osd->eth.rx_discards,
913 &nsd->eth.rx_discards);
914 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
915 I40E_GLPRT_UPRCL(hw->port),
916 pf->stat_offsets_loaded,
917 &osd->eth.rx_unicast,
918 &nsd->eth.rx_unicast);
919 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
920 I40E_GLPRT_MPRCL(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->eth.rx_multicast,
923 &nsd->eth.rx_multicast);
924 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
925 I40E_GLPRT_BPRCL(hw->port),
926 pf->stat_offsets_loaded,
927 &osd->eth.rx_broadcast,
928 &nsd->eth.rx_broadcast);
929 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
930 I40E_GLPRT_UPTCL(hw->port),
931 pf->stat_offsets_loaded,
932 &osd->eth.tx_unicast,
933 &nsd->eth.tx_unicast);
934 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
935 I40E_GLPRT_MPTCL(hw->port),
936 pf->stat_offsets_loaded,
937 &osd->eth.tx_multicast,
938 &nsd->eth.tx_multicast);
939 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
940 I40E_GLPRT_BPTCL(hw->port),
941 pf->stat_offsets_loaded,
942 &osd->eth.tx_broadcast,
943 &nsd->eth.tx_broadcast);
945 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
946 pf->stat_offsets_loaded,
947 &osd->tx_dropped_link_down,
948 &nsd->tx_dropped_link_down);
950 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
951 pf->stat_offsets_loaded,
952 &osd->crc_errors, &nsd->crc_errors);
954 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
955 pf->stat_offsets_loaded,
956 &osd->illegal_bytes, &nsd->illegal_bytes);
958 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
959 pf->stat_offsets_loaded,
960 &osd->mac_local_faults,
961 &nsd->mac_local_faults);
962 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
963 pf->stat_offsets_loaded,
964 &osd->mac_remote_faults,
965 &nsd->mac_remote_faults);
967 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
968 pf->stat_offsets_loaded,
969 &osd->rx_length_errors,
970 &nsd->rx_length_errors);
972 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
973 pf->stat_offsets_loaded,
974 &osd->link_xon_rx, &nsd->link_xon_rx);
975 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->link_xon_tx, &nsd->link_xon_tx);
978 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
979 pf->stat_offsets_loaded,
980 &osd->link_xoff_rx, &nsd->link_xoff_rx);
981 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
982 pf->stat_offsets_loaded,
983 &osd->link_xoff_tx, &nsd->link_xoff_tx);
985 for (i = 0; i < 8; i++) {
986 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
987 pf->stat_offsets_loaded,
988 &osd->priority_xoff_rx[i],
989 &nsd->priority_xoff_rx[i]);
990 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
991 pf->stat_offsets_loaded,
992 &osd->priority_xon_rx[i],
993 &nsd->priority_xon_rx[i]);
994 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
995 pf->stat_offsets_loaded,
996 &osd->priority_xon_tx[i],
997 &nsd->priority_xon_tx[i]);
998 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
999 pf->stat_offsets_loaded,
1000 &osd->priority_xoff_tx[i],
1001 &nsd->priority_xoff_tx[i]);
1002 i40e_stat_update32(hw,
1003 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1004 pf->stat_offsets_loaded,
1005 &osd->priority_xon_2_xoff[i],
1006 &nsd->priority_xon_2_xoff[i]);
1009 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1010 I40E_GLPRT_PRC64L(hw->port),
1011 pf->stat_offsets_loaded,
1012 &osd->rx_size_64, &nsd->rx_size_64);
1013 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1014 I40E_GLPRT_PRC127L(hw->port),
1015 pf->stat_offsets_loaded,
1016 &osd->rx_size_127, &nsd->rx_size_127);
1017 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1018 I40E_GLPRT_PRC255L(hw->port),
1019 pf->stat_offsets_loaded,
1020 &osd->rx_size_255, &nsd->rx_size_255);
1021 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1022 I40E_GLPRT_PRC511L(hw->port),
1023 pf->stat_offsets_loaded,
1024 &osd->rx_size_511, &nsd->rx_size_511);
1025 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1026 I40E_GLPRT_PRC1023L(hw->port),
1027 pf->stat_offsets_loaded,
1028 &osd->rx_size_1023, &nsd->rx_size_1023);
1029 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1030 I40E_GLPRT_PRC1522L(hw->port),
1031 pf->stat_offsets_loaded,
1032 &osd->rx_size_1522, &nsd->rx_size_1522);
1033 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1034 I40E_GLPRT_PRC9522L(hw->port),
1035 pf->stat_offsets_loaded,
1036 &osd->rx_size_big, &nsd->rx_size_big);
1038 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1039 I40E_GLPRT_PTC64L(hw->port),
1040 pf->stat_offsets_loaded,
1041 &osd->tx_size_64, &nsd->tx_size_64);
1042 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1043 I40E_GLPRT_PTC127L(hw->port),
1044 pf->stat_offsets_loaded,
1045 &osd->tx_size_127, &nsd->tx_size_127);
1046 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1047 I40E_GLPRT_PTC255L(hw->port),
1048 pf->stat_offsets_loaded,
1049 &osd->tx_size_255, &nsd->tx_size_255);
1050 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1051 I40E_GLPRT_PTC511L(hw->port),
1052 pf->stat_offsets_loaded,
1053 &osd->tx_size_511, &nsd->tx_size_511);
1054 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1055 I40E_GLPRT_PTC1023L(hw->port),
1056 pf->stat_offsets_loaded,
1057 &osd->tx_size_1023, &nsd->tx_size_1023);
1058 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1059 I40E_GLPRT_PTC1522L(hw->port),
1060 pf->stat_offsets_loaded,
1061 &osd->tx_size_1522, &nsd->tx_size_1522);
1062 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1063 I40E_GLPRT_PTC9522L(hw->port),
1064 pf->stat_offsets_loaded,
1065 &osd->tx_size_big, &nsd->tx_size_big);
1067 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1068 pf->stat_offsets_loaded,
1069 &osd->rx_undersize, &nsd->rx_undersize);
1070 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1071 pf->stat_offsets_loaded,
1072 &osd->rx_fragments, &nsd->rx_fragments);
1073 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1074 pf->stat_offsets_loaded,
1075 &osd->rx_oversize, &nsd->rx_oversize);
1076 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1077 pf->stat_offsets_loaded,
1078 &osd->rx_jabber, &nsd->rx_jabber);
1081 i40e_stat_update_and_clear32(hw,
1082 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1083 &nsd->fd_atr_match);
1084 i40e_stat_update_and_clear32(hw,
1085 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1087 i40e_stat_update_and_clear32(hw,
1088 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1089 &nsd->fd_atr_tunnel_match);
1091 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1092 nsd->tx_lpi_status =
1093 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1094 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1095 nsd->rx_lpi_status =
1096 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1097 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1098 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1099 pf->stat_offsets_loaded,
1100 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1101 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1102 pf->stat_offsets_loaded,
1103 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1105 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1106 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1107 nsd->fd_sb_status = true;
1109 nsd->fd_sb_status = false;
1111 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1112 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1113 nsd->fd_atr_status = true;
1115 nsd->fd_atr_status = false;
1117 pf->stat_offsets_loaded = true;
1121 * i40e_update_stats - Update the various statistics counters.
1122 * @vsi: the VSI to be updated
1124 * Update the various stats for this VSI and its related entities.
1126 void i40e_update_stats(struct i40e_vsi *vsi)
1128 struct i40e_pf *pf = vsi->back;
1130 if (vsi == pf->vsi[pf->lan_vsi])
1131 i40e_update_pf_stats(pf);
1133 i40e_update_vsi_stats(vsi);
1137 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1138 * @vsi: the VSI to be searched
1139 * @macaddr: the MAC address
1142 * Returns ptr to the filter object or NULL
1144 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1145 const u8 *macaddr, s16 vlan)
1147 struct i40e_mac_filter *f;
1150 if (!vsi || !macaddr)
1153 key = i40e_addr_to_hkey(macaddr);
1154 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1155 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1163 * i40e_find_mac - Find a mac addr in the macvlan filters list
1164 * @vsi: the VSI to be searched
1165 * @macaddr: the MAC address we are searching for
1167 * Returns the first filter with the provided MAC address or NULL if
1168 * MAC address was not found
1170 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1172 struct i40e_mac_filter *f;
1175 if (!vsi || !macaddr)
1178 key = i40e_addr_to_hkey(macaddr);
1179 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1180 if ((ether_addr_equal(macaddr, f->macaddr)))
1187 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1188 * @vsi: the VSI to be searched
1190 * Returns true if VSI is in vlan mode or false otherwise
1192 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1194 /* If we have a PVID, always operate in VLAN mode */
1198 /* We need to operate in VLAN mode whenever we have any filters with
1199 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1200 * time, incurring search cost repeatedly. However, we can notice two
1203 * 1) the only place where we can gain a VLAN filter is in
1206 * 2) the only place where filters are actually removed is in
1207 * i40e_sync_filters_subtask.
1209 * Thus, we can simply use a boolean value, has_vlan_filters which we
1210 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1211 * we have to perform the full search after deleting filters in
1212 * i40e_sync_filters_subtask, but we already have to search
1213 * filters here and can perform the check at the same time. This
1214 * results in avoiding embedding a loop for VLAN mode inside another
1215 * loop over all the filters, and should maintain correctness as noted
1218 return vsi->has_vlan_filter;
1222 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1223 * @vsi: the VSI to configure
1224 * @tmp_add_list: list of filters ready to be added
1225 * @tmp_del_list: list of filters ready to be deleted
1226 * @vlan_filters: the number of active VLAN filters
1228 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1229 * behave as expected. If we have any active VLAN filters remaining or about
1230 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1231 * so that they only match against untagged traffic. If we no longer have any
1232 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1233 * so that they match against both tagged and untagged traffic. In this way,
1234 * we ensure that we correctly receive the desired traffic. This ensures that
1235 * when we have an active VLAN we will receive only untagged traffic and
1236 * traffic matching active VLANs. If we have no active VLANs then we will
1237 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1239 * Finally, in a similar fashion, this function also corrects filters when
1240 * there is an active PVID assigned to this VSI.
1242 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1244 * This function is only expected to be called from within
1245 * i40e_sync_vsi_filters.
1247 * NOTE: This function expects to be called while under the
1248 * mac_filter_hash_lock
1250 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1251 struct hlist_head *tmp_add_list,
1252 struct hlist_head *tmp_del_list,
1255 s16 pvid = le16_to_cpu(vsi->info.pvid);
1256 struct i40e_mac_filter *f, *add_head;
1257 struct i40e_new_mac_filter *new;
1258 struct hlist_node *h;
1261 /* To determine if a particular filter needs to be replaced we
1262 * have the three following conditions:
1264 * a) if we have a PVID assigned, then all filters which are
1265 * not marked as VLAN=PVID must be replaced with filters that
1267 * b) otherwise, if we have any active VLANS, all filters
1268 * which are marked as VLAN=-1 must be replaced with
1269 * filters marked as VLAN=0
1270 * c) finally, if we do not have any active VLANS, all filters
1271 * which are marked as VLAN=0 must be replaced with filters
1275 /* Update the filters about to be added in place */
1276 hlist_for_each_entry(new, tmp_add_list, hlist) {
1277 if (pvid && new->f->vlan != pvid)
1278 new->f->vlan = pvid;
1279 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1281 else if (!vlan_filters && new->f->vlan == 0)
1282 new->f->vlan = I40E_VLAN_ANY;
1285 /* Update the remaining active filters */
1286 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1287 /* Combine the checks for whether a filter needs to be changed
1288 * and then determine the new VLAN inside the if block, in
1289 * order to avoid duplicating code for adding the new filter
1290 * then deleting the old filter.
1292 if ((pvid && f->vlan != pvid) ||
1293 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1294 (!vlan_filters && f->vlan == 0)) {
1295 /* Determine the new vlan we will be adding */
1298 else if (vlan_filters)
1301 new_vlan = I40E_VLAN_ANY;
1303 /* Create the new filter */
1304 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1308 /* Create a temporary i40e_new_mac_filter */
1309 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1314 new->state = add_head->state;
1316 /* Add the new filter to the tmp list */
1317 hlist_add_head(&new->hlist, tmp_add_list);
1319 /* Put the original filter into the delete list */
1320 f->state = I40E_FILTER_REMOVE;
1321 hash_del(&f->hlist);
1322 hlist_add_head(&f->hlist, tmp_del_list);
1326 vsi->has_vlan_filter = !!vlan_filters;
1332 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1333 * @vsi: the PF Main VSI - inappropriate for any other VSI
1334 * @macaddr: the MAC address
1336 * Remove whatever filter the firmware set up so the driver can manage
1337 * its own filtering intelligently.
1339 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1341 struct i40e_aqc_remove_macvlan_element_data element;
1342 struct i40e_pf *pf = vsi->back;
1344 /* Only appropriate for the PF main VSI */
1345 if (vsi->type != I40E_VSI_MAIN)
1348 memset(&element, 0, sizeof(element));
1349 ether_addr_copy(element.mac_addr, macaddr);
1350 element.vlan_tag = 0;
1351 /* Ignore error returns, some firmware does it this way... */
1352 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1353 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1355 memset(&element, 0, sizeof(element));
1356 ether_addr_copy(element.mac_addr, macaddr);
1357 element.vlan_tag = 0;
1358 /* ...and some firmware does it this way. */
1359 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1360 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1361 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1365 * i40e_add_filter - Add a mac/vlan filter to the VSI
1366 * @vsi: the VSI to be searched
1367 * @macaddr: the MAC address
1370 * Returns ptr to the filter object or NULL when no memory available.
1372 * NOTE: This function is expected to be called with mac_filter_hash_lock
1375 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1376 const u8 *macaddr, s16 vlan)
1378 struct i40e_mac_filter *f;
1381 if (!vsi || !macaddr)
1384 f = i40e_find_filter(vsi, macaddr, vlan);
1386 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1390 /* Update the boolean indicating if we need to function in
1394 vsi->has_vlan_filter = true;
1396 ether_addr_copy(f->macaddr, macaddr);
1398 f->state = I40E_FILTER_NEW;
1399 INIT_HLIST_NODE(&f->hlist);
1401 key = i40e_addr_to_hkey(macaddr);
1402 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1404 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1405 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1408 /* If we're asked to add a filter that has been marked for removal, it
1409 * is safe to simply restore it to active state. __i40e_del_filter
1410 * will have simply deleted any filters which were previously marked
1411 * NEW or FAILED, so if it is currently marked REMOVE it must have
1412 * previously been ACTIVE. Since we haven't yet run the sync filters
1413 * task, just restore this filter to the ACTIVE state so that the
1414 * sync task leaves it in place
1416 if (f->state == I40E_FILTER_REMOVE)
1417 f->state = I40E_FILTER_ACTIVE;
1423 * __i40e_del_filter - Remove a specific filter from the VSI
1424 * @vsi: VSI to remove from
1425 * @f: the filter to remove from the list
1427 * This function should be called instead of i40e_del_filter only if you know
1428 * the exact filter you will remove already, such as via i40e_find_filter or
1431 * NOTE: This function is expected to be called with mac_filter_hash_lock
1433 * ANOTHER NOTE: This function MUST be called from within the context of
1434 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1435 * instead of list_for_each_entry().
1437 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1442 /* If the filter was never added to firmware then we can just delete it
1443 * directly and we don't want to set the status to remove or else an
1444 * admin queue command will unnecessarily fire.
1446 if ((f->state == I40E_FILTER_FAILED) ||
1447 (f->state == I40E_FILTER_NEW)) {
1448 hash_del(&f->hlist);
1451 f->state = I40E_FILTER_REMOVE;
1454 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1455 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1459 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1460 * @vsi: the VSI to be searched
1461 * @macaddr: the MAC address
1464 * NOTE: This function is expected to be called with mac_filter_hash_lock
1466 * ANOTHER NOTE: This function MUST be called from within the context of
1467 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1468 * instead of list_for_each_entry().
1470 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1472 struct i40e_mac_filter *f;
1474 if (!vsi || !macaddr)
1477 f = i40e_find_filter(vsi, macaddr, vlan);
1478 __i40e_del_filter(vsi, f);
1482 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1483 * @vsi: the VSI to be searched
1484 * @macaddr: the mac address to be filtered
1486 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1487 * go through all the macvlan filters and add a macvlan filter for each
1488 * unique vlan that already exists. If a PVID has been assigned, instead only
1489 * add the macaddr to that VLAN.
1491 * Returns last filter added on success, else NULL
1493 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1496 struct i40e_mac_filter *f, *add = NULL;
1497 struct hlist_node *h;
1501 return i40e_add_filter(vsi, macaddr,
1502 le16_to_cpu(vsi->info.pvid));
1504 if (!i40e_is_vsi_in_vlan(vsi))
1505 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1507 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1508 if (f->state == I40E_FILTER_REMOVE)
1510 add = i40e_add_filter(vsi, macaddr, f->vlan);
1519 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1520 * @vsi: the VSI to be searched
1521 * @macaddr: the mac address to be removed
1523 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1526 * Returns 0 for success, or error
1528 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1530 struct i40e_mac_filter *f;
1531 struct hlist_node *h;
1535 WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
1536 "Missing mac_filter_hash_lock\n");
1537 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1538 if (ether_addr_equal(macaddr, f->macaddr)) {
1539 __i40e_del_filter(vsi, f);
1551 * i40e_set_mac - NDO callback to set mac address
1552 * @netdev: network interface device structure
1553 * @p: pointer to an address structure
1555 * Returns 0 on success, negative on failure
1557 static int i40e_set_mac(struct net_device *netdev, void *p)
1559 struct i40e_netdev_priv *np = netdev_priv(netdev);
1560 struct i40e_vsi *vsi = np->vsi;
1561 struct i40e_pf *pf = vsi->back;
1562 struct i40e_hw *hw = &pf->hw;
1563 struct sockaddr *addr = p;
1565 if (!is_valid_ether_addr(addr->sa_data))
1566 return -EADDRNOTAVAIL;
1568 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1569 netdev_info(netdev, "already using mac address %pM\n",
1574 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
1575 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
1576 return -EADDRNOTAVAIL;
1578 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1579 netdev_info(netdev, "returning to hw mac address %pM\n",
1582 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1584 /* Copy the address first, so that we avoid a possible race with
1586 * - Remove old address from MAC filter
1587 * - Copy new address
1588 * - Add new address to MAC filter
1590 spin_lock_bh(&vsi->mac_filter_hash_lock);
1591 i40e_del_mac_filter(vsi, netdev->dev_addr);
1592 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1593 i40e_add_mac_filter(vsi, netdev->dev_addr);
1594 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1596 if (vsi->type == I40E_VSI_MAIN) {
1599 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1600 I40E_AQC_WRITE_TYPE_LAA_WOL,
1601 addr->sa_data, NULL);
1603 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1604 i40e_stat_str(hw, ret),
1605 i40e_aq_str(hw, hw->aq.asq_last_status));
1608 /* schedule our worker thread which will take care of
1609 * applying the new filter changes
1611 i40e_service_event_schedule(vsi->back);
1616 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1617 * @vsi: vsi structure
1618 * @seed: RSS hash seed
1620 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1621 u8 *lut, u16 lut_size)
1623 struct i40e_pf *pf = vsi->back;
1624 struct i40e_hw *hw = &pf->hw;
1628 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1629 (struct i40e_aqc_get_set_rss_key_data *)seed;
1630 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1632 dev_info(&pf->pdev->dev,
1633 "Cannot set RSS key, err %s aq_err %s\n",
1634 i40e_stat_str(hw, ret),
1635 i40e_aq_str(hw, hw->aq.asq_last_status));
1640 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
1642 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1644 dev_info(&pf->pdev->dev,
1645 "Cannot set RSS lut, err %s aq_err %s\n",
1646 i40e_stat_str(hw, ret),
1647 i40e_aq_str(hw, hw->aq.asq_last_status));
1655 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1656 * @vsi: VSI structure
1658 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1660 struct i40e_pf *pf = vsi->back;
1661 u8 seed[I40E_HKEY_ARRAY_SIZE];
1665 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1668 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1669 vsi->num_queue_pairs);
1672 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1676 /* Use the user configured hash keys and lookup table if there is one,
1677 * otherwise use default
1679 if (vsi->rss_lut_user)
1680 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1682 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1683 if (vsi->rss_hkey_user)
1684 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1686 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1687 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1693 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1694 * @vsi: the VSI being configured,
1695 * @ctxt: VSI context structure
1696 * @enabled_tc: number of traffic classes to enable
1698 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1700 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1701 struct i40e_vsi_context *ctxt,
1704 u16 qcount = 0, max_qcount, qmap, sections = 0;
1705 int i, override_q, pow, num_qps, ret;
1706 u8 netdev_tc = 0, offset = 0;
1708 if (vsi->type != I40E_VSI_MAIN)
1710 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1711 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1712 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1713 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1714 num_qps = vsi->mqprio_qopt.qopt.count[0];
1716 /* find the next higher power-of-2 of num queue pairs */
1717 pow = ilog2(num_qps);
1718 if (!is_power_of_2(num_qps))
1720 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1721 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1723 /* Setup queue offset/count for all TCs for given VSI */
1724 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1725 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1726 /* See if the given TC is enabled for the given VSI */
1727 if (vsi->tc_config.enabled_tc & BIT(i)) {
1728 offset = vsi->mqprio_qopt.qopt.offset[i];
1729 qcount = vsi->mqprio_qopt.qopt.count[i];
1730 if (qcount > max_qcount)
1731 max_qcount = qcount;
1732 vsi->tc_config.tc_info[i].qoffset = offset;
1733 vsi->tc_config.tc_info[i].qcount = qcount;
1734 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1736 /* TC is not enabled so set the offset to
1737 * default queue and allocate one queue
1740 vsi->tc_config.tc_info[i].qoffset = 0;
1741 vsi->tc_config.tc_info[i].qcount = 1;
1742 vsi->tc_config.tc_info[i].netdev_tc = 0;
1746 /* Set actual Tx/Rx queue pairs */
1747 vsi->num_queue_pairs = offset + qcount;
1749 /* Setup queue TC[0].qmap for given VSI context */
1750 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1751 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1752 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1753 ctxt->info.valid_sections |= cpu_to_le16(sections);
1755 /* Reconfigure RSS for main VSI with max queue count */
1756 vsi->rss_size = max_qcount;
1757 ret = i40e_vsi_config_rss(vsi);
1759 dev_info(&vsi->back->pdev->dev,
1760 "Failed to reconfig rss for num_queues (%u)\n",
1764 vsi->reconfig_rss = true;
1765 dev_dbg(&vsi->back->pdev->dev,
1766 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1768 /* Find queue count available for channel VSIs and starting offset
1771 override_q = vsi->mqprio_qopt.qopt.count[0];
1772 if (override_q && override_q < vsi->num_queue_pairs) {
1773 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1774 vsi->next_base_queue = override_q;
1780 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1781 * @vsi: the VSI being setup
1782 * @ctxt: VSI context structure
1783 * @enabled_tc: Enabled TCs bitmap
1784 * @is_add: True if called before Add VSI
1786 * Setup VSI queue mapping for enabled traffic classes.
1788 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1789 struct i40e_vsi_context *ctxt,
1793 struct i40e_pf *pf = vsi->back;
1803 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1806 if (vsi->type == I40E_VSI_MAIN) {
1807 /* This code helps add more queue to the VSI if we have
1808 * more cores than RSS can support, the higher cores will
1809 * be served by ATR or other filters. Furthermore, the
1810 * non-zero req_queue_pairs says that user requested a new
1811 * queue count via ethtool's set_channels, so use this
1812 * value for queues distribution across traffic classes
1813 * We need at least one queue pair for the interface
1814 * to be usable as we see in else statement.
1816 if (vsi->req_queue_pairs > 0)
1817 vsi->num_queue_pairs = vsi->req_queue_pairs;
1818 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1819 vsi->num_queue_pairs = pf->num_lan_msix;
1821 vsi->num_queue_pairs = 1;
1824 /* Number of queues per enabled TC */
1825 if (vsi->type == I40E_VSI_MAIN)
1826 num_tc_qps = vsi->num_queue_pairs;
1828 num_tc_qps = vsi->alloc_queue_pairs;
1829 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1830 /* Find numtc from enabled TC bitmap */
1831 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1832 if (enabled_tc & BIT(i)) /* TC is enabled */
1836 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1839 num_tc_qps = num_tc_qps / numtc;
1840 num_tc_qps = min_t(int, num_tc_qps,
1841 i40e_pf_get_max_q_per_tc(pf));
1844 vsi->tc_config.numtc = numtc;
1845 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1847 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
1848 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1849 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1851 /* Setup queue offset/count for all TCs for given VSI */
1852 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1853 /* See if the given TC is enabled for the given VSI */
1854 if (vsi->tc_config.enabled_tc & BIT(i)) {
1858 switch (vsi->type) {
1860 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1861 I40E_FLAG_FD_ATR_ENABLED)) ||
1862 vsi->tc_config.enabled_tc != 1) {
1863 qcount = min_t(int, pf->alloc_rss_size,
1869 case I40E_VSI_SRIOV:
1870 case I40E_VSI_VMDQ2:
1872 qcount = num_tc_qps;
1876 vsi->tc_config.tc_info[i].qoffset = offset;
1877 vsi->tc_config.tc_info[i].qcount = qcount;
1879 /* find the next higher power-of-2 of num queue pairs */
1882 while (num_qps && (BIT_ULL(pow) < qcount)) {
1887 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1889 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1890 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1894 /* TC is not enabled so set the offset to
1895 * default queue and allocate one queue
1898 vsi->tc_config.tc_info[i].qoffset = 0;
1899 vsi->tc_config.tc_info[i].qcount = 1;
1900 vsi->tc_config.tc_info[i].netdev_tc = 0;
1904 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1906 /* Do not change previously set num_queue_pairs for PFs */
1907 if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
1908 vsi->type != I40E_VSI_MAIN)
1909 vsi->num_queue_pairs = offset;
1910 /* Scheduler section valid can only be set for ADD VSI */
1912 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1914 ctxt->info.up_enable_bits = enabled_tc;
1916 if (vsi->type == I40E_VSI_SRIOV) {
1917 ctxt->info.mapping_flags |=
1918 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1919 for (i = 0; i < vsi->num_queue_pairs; i++)
1920 ctxt->info.queue_mapping[i] =
1921 cpu_to_le16(vsi->base_queue + i);
1923 ctxt->info.mapping_flags |=
1924 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1925 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1927 ctxt->info.valid_sections |= cpu_to_le16(sections);
1931 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1932 * @netdev: the netdevice
1933 * @addr: address to add
1935 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1936 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1938 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1940 struct i40e_netdev_priv *np = netdev_priv(netdev);
1941 struct i40e_vsi *vsi = np->vsi;
1943 if (i40e_add_mac_filter(vsi, addr))
1950 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1951 * @netdev: the netdevice
1952 * @addr: address to add
1954 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1955 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1957 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1959 struct i40e_netdev_priv *np = netdev_priv(netdev);
1960 struct i40e_vsi *vsi = np->vsi;
1962 /* Under some circumstances, we might receive a request to delete
1963 * our own device address from our uc list. Because we store the
1964 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1965 * such requests and not delete our device address from this list.
1967 if (ether_addr_equal(addr, netdev->dev_addr))
1970 i40e_del_mac_filter(vsi, addr);
1976 * i40e_set_rx_mode - NDO callback to set the netdev filters
1977 * @netdev: network interface device structure
1979 static void i40e_set_rx_mode(struct net_device *netdev)
1981 struct i40e_netdev_priv *np = netdev_priv(netdev);
1982 struct i40e_vsi *vsi = np->vsi;
1984 spin_lock_bh(&vsi->mac_filter_hash_lock);
1986 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1987 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1989 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1991 /* check for other flag changes */
1992 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1993 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1994 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1999 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
2000 * @vsi: Pointer to VSI struct
2001 * @from: Pointer to list which contains MAC filter entries - changes to
2002 * those entries needs to be undone.
2004 * MAC filter entries from this list were slated for deletion.
2006 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
2007 struct hlist_head *from)
2009 struct i40e_mac_filter *f;
2010 struct hlist_node *h;
2012 hlist_for_each_entry_safe(f, h, from, hlist) {
2013 u64 key = i40e_addr_to_hkey(f->macaddr);
2015 /* Move the element back into MAC filter list*/
2016 hlist_del(&f->hlist);
2017 hash_add(vsi->mac_filter_hash, &f->hlist, key);
2022 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2023 * @vsi: Pointer to vsi struct
2024 * @from: Pointer to list which contains MAC filter entries - changes to
2025 * those entries needs to be undone.
2027 * MAC filter entries from this list were slated for addition.
2029 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2030 struct hlist_head *from)
2032 struct i40e_new_mac_filter *new;
2033 struct hlist_node *h;
2035 hlist_for_each_entry_safe(new, h, from, hlist) {
2036 /* We can simply free the wrapper structure */
2037 hlist_del(&new->hlist);
2038 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2044 * i40e_next_entry - Get the next non-broadcast filter from a list
2045 * @next: pointer to filter in list
2047 * Returns the next non-broadcast filter in the list. Required so that we
2048 * ignore broadcast filters within the list, since these are not handled via
2049 * the normal firmware update path.
2052 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2054 hlist_for_each_entry_continue(next, hlist) {
2055 if (!is_broadcast_ether_addr(next->f->macaddr))
2063 * i40e_update_filter_state - Update filter state based on return data
2065 * @count: Number of filters added
2066 * @add_list: return data from fw
2067 * @add_head: pointer to first filter in current batch
2069 * MAC filter entries from list were slated to be added to device. Returns
2070 * number of successful filters. Note that 0 does NOT mean success!
2073 i40e_update_filter_state(int count,
2074 struct i40e_aqc_add_macvlan_element_data *add_list,
2075 struct i40e_new_mac_filter *add_head)
2080 for (i = 0; i < count; i++) {
2081 /* Always check status of each filter. We don't need to check
2082 * the firmware return status because we pre-set the filter
2083 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2084 * request to the adminq. Thus, if it no longer matches then
2085 * we know the filter is active.
2087 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2088 add_head->state = I40E_FILTER_FAILED;
2090 add_head->state = I40E_FILTER_ACTIVE;
2094 add_head = i40e_next_filter(add_head);
2103 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2104 * @vsi: ptr to the VSI
2105 * @vsi_name: name to display in messages
2106 * @list: the list of filters to send to firmware
2107 * @num_del: the number of filters to delete
2108 * @retval: Set to -EIO on failure to delete
2110 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2111 * *retval instead of a return value so that success does not force ret_val to
2112 * be set to 0. This ensures that a sequence of calls to this function
2113 * preserve the previous value of *retval on successful delete.
2116 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2117 struct i40e_aqc_remove_macvlan_element_data *list,
2118 int num_del, int *retval)
2120 struct i40e_hw *hw = &vsi->back->hw;
2124 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2125 aq_err = hw->aq.asq_last_status;
2127 /* Explicitly ignore and do not report when firmware returns ENOENT */
2128 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2130 dev_info(&vsi->back->pdev->dev,
2131 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2132 vsi_name, i40e_stat_str(hw, aq_ret),
2133 i40e_aq_str(hw, aq_err));
2138 * i40e_aqc_add_filters - Request firmware to add a set of filters
2139 * @vsi: ptr to the VSI
2140 * @vsi_name: name to display in messages
2141 * @list: the list of filters to send to firmware
2142 * @add_head: Position in the add hlist
2143 * @num_add: the number of filters to add
2145 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2146 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2147 * space for more filters.
2150 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2151 struct i40e_aqc_add_macvlan_element_data *list,
2152 struct i40e_new_mac_filter *add_head,
2155 struct i40e_hw *hw = &vsi->back->hw;
2158 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2159 aq_err = hw->aq.asq_last_status;
2160 fcnt = i40e_update_filter_state(num_add, list, add_head);
2162 if (fcnt != num_add) {
2163 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2164 dev_warn(&vsi->back->pdev->dev,
2165 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2166 i40e_aq_str(hw, aq_err),
2172 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2173 * @vsi: pointer to the VSI
2174 * @vsi_name: the VSI name
2177 * This function sets or clears the promiscuous broadcast flags for VLAN
2178 * filters in order to properly receive broadcast frames. Assumes that only
2179 * broadcast filters are passed.
2181 * Returns status indicating success or failure;
2184 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2185 struct i40e_mac_filter *f)
2187 bool enable = f->state == I40E_FILTER_NEW;
2188 struct i40e_hw *hw = &vsi->back->hw;
2191 if (f->vlan == I40E_VLAN_ANY) {
2192 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2197 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2205 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2206 dev_warn(&vsi->back->pdev->dev,
2207 "Error %s, forcing overflow promiscuous on %s\n",
2208 i40e_aq_str(hw, hw->aq.asq_last_status),
2216 * i40e_set_promiscuous - set promiscuous mode
2217 * @pf: board private structure
2218 * @promisc: promisc on or off
2220 * There are different ways of setting promiscuous mode on a PF depending on
2221 * what state/environment we're in. This identifies and sets it appropriately.
2222 * Returns 0 on success.
2224 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2226 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2227 struct i40e_hw *hw = &pf->hw;
2230 if (vsi->type == I40E_VSI_MAIN &&
2231 pf->lan_veb != I40E_NO_VEB &&
2232 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2233 /* set defport ON for Main VSI instead of true promisc
2234 * this way we will get all unicast/multicast and VLAN
2235 * promisc behavior but will not get VF or VMDq traffic
2236 * replicated on the Main VSI.
2239 aq_ret = i40e_aq_set_default_vsi(hw,
2243 aq_ret = i40e_aq_clear_default_vsi(hw,
2247 dev_info(&pf->pdev->dev,
2248 "Set default VSI failed, err %s, aq_err %s\n",
2249 i40e_stat_str(hw, aq_ret),
2250 i40e_aq_str(hw, hw->aq.asq_last_status));
2253 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2259 dev_info(&pf->pdev->dev,
2260 "set unicast promisc failed, err %s, aq_err %s\n",
2261 i40e_stat_str(hw, aq_ret),
2262 i40e_aq_str(hw, hw->aq.asq_last_status));
2264 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2269 dev_info(&pf->pdev->dev,
2270 "set multicast promisc failed, err %s, aq_err %s\n",
2271 i40e_stat_str(hw, aq_ret),
2272 i40e_aq_str(hw, hw->aq.asq_last_status));
2277 pf->cur_promisc = promisc;
2283 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2284 * @vsi: ptr to the VSI
2286 * Push any outstanding VSI filter changes through the AdminQ.
2288 * Returns 0 or error value
2290 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2292 struct hlist_head tmp_add_list, tmp_del_list;
2293 struct i40e_mac_filter *f;
2294 struct i40e_new_mac_filter *new, *add_head = NULL;
2295 struct i40e_hw *hw = &vsi->back->hw;
2296 bool old_overflow, new_overflow;
2297 unsigned int failed_filters = 0;
2298 unsigned int vlan_filters = 0;
2299 char vsi_name[16] = "PF";
2300 int filter_list_len = 0;
2301 i40e_status aq_ret = 0;
2302 u32 changed_flags = 0;
2303 struct hlist_node *h;
2312 /* empty array typed pointers, kcalloc later */
2313 struct i40e_aqc_add_macvlan_element_data *add_list;
2314 struct i40e_aqc_remove_macvlan_element_data *del_list;
2316 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2317 usleep_range(1000, 2000);
2320 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2323 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2324 vsi->current_netdev_flags = vsi->netdev->flags;
2327 INIT_HLIST_HEAD(&tmp_add_list);
2328 INIT_HLIST_HEAD(&tmp_del_list);
2330 if (vsi->type == I40E_VSI_SRIOV)
2331 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2332 else if (vsi->type != I40E_VSI_MAIN)
2333 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2335 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2336 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2338 spin_lock_bh(&vsi->mac_filter_hash_lock);
2339 /* Create a list of filters to delete. */
2340 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2341 if (f->state == I40E_FILTER_REMOVE) {
2342 /* Move the element into temporary del_list */
2343 hash_del(&f->hlist);
2344 hlist_add_head(&f->hlist, &tmp_del_list);
2346 /* Avoid counting removed filters */
2349 if (f->state == I40E_FILTER_NEW) {
2350 /* Create a temporary i40e_new_mac_filter */
2351 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2353 goto err_no_memory_locked;
2355 /* Store pointer to the real filter */
2357 new->state = f->state;
2359 /* Add it to the hash list */
2360 hlist_add_head(&new->hlist, &tmp_add_list);
2363 /* Count the number of active (current and new) VLAN
2364 * filters we have now. Does not count filters which
2365 * are marked for deletion.
2371 retval = i40e_correct_mac_vlan_filters(vsi,
2376 hlist_for_each_entry(new, &tmp_add_list, hlist)
2377 netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
2380 goto err_no_memory_locked;
2382 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2385 /* Now process 'del_list' outside the lock */
2386 if (!hlist_empty(&tmp_del_list)) {
2387 filter_list_len = hw->aq.asq_buf_size /
2388 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2389 list_size = filter_list_len *
2390 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2391 del_list = kzalloc(list_size, GFP_ATOMIC);
2395 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2398 /* handle broadcast filters by updating the broadcast
2399 * promiscuous flag and release filter list.
2401 if (is_broadcast_ether_addr(f->macaddr)) {
2402 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2404 hlist_del(&f->hlist);
2409 /* add to delete list */
2410 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2411 if (f->vlan == I40E_VLAN_ANY) {
2412 del_list[num_del].vlan_tag = 0;
2413 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2415 del_list[num_del].vlan_tag =
2416 cpu_to_le16((u16)(f->vlan));
2419 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2420 del_list[num_del].flags = cmd_flags;
2423 /* flush a full buffer */
2424 if (num_del == filter_list_len) {
2425 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2427 memset(del_list, 0, list_size);
2430 /* Release memory for MAC filter entries which were
2431 * synced up with HW.
2433 hlist_del(&f->hlist);
2438 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2446 if (!hlist_empty(&tmp_add_list)) {
2447 /* Do all the adds now. */
2448 filter_list_len = hw->aq.asq_buf_size /
2449 sizeof(struct i40e_aqc_add_macvlan_element_data);
2450 list_size = filter_list_len *
2451 sizeof(struct i40e_aqc_add_macvlan_element_data);
2452 add_list = kzalloc(list_size, GFP_ATOMIC);
2457 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2458 /* handle broadcast filters by updating the broadcast
2459 * promiscuous flag instead of adding a MAC filter.
2461 if (is_broadcast_ether_addr(new->f->macaddr)) {
2462 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2464 new->state = I40E_FILTER_FAILED;
2466 new->state = I40E_FILTER_ACTIVE;
2470 /* add to add array */
2474 ether_addr_copy(add_list[num_add].mac_addr,
2476 if (new->f->vlan == I40E_VLAN_ANY) {
2477 add_list[num_add].vlan_tag = 0;
2478 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2480 add_list[num_add].vlan_tag =
2481 cpu_to_le16((u16)(new->f->vlan));
2483 add_list[num_add].queue_number = 0;
2484 /* set invalid match method for later detection */
2485 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2486 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2487 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2490 /* flush a full buffer */
2491 if (num_add == filter_list_len) {
2492 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2494 memset(add_list, 0, list_size);
2499 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2502 /* Now move all of the filters from the temp add list back to
2505 spin_lock_bh(&vsi->mac_filter_hash_lock);
2506 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2507 /* Only update the state if we're still NEW */
2508 if (new->f->state == I40E_FILTER_NEW)
2509 new->f->state = new->state;
2510 hlist_del(&new->hlist);
2511 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
2514 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2519 /* Determine the number of active and failed filters. */
2520 spin_lock_bh(&vsi->mac_filter_hash_lock);
2521 vsi->active_filters = 0;
2522 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2523 if (f->state == I40E_FILTER_ACTIVE)
2524 vsi->active_filters++;
2525 else if (f->state == I40E_FILTER_FAILED)
2528 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2530 /* Check if we are able to exit overflow promiscuous mode. We can
2531 * safely exit if we didn't just enter, we no longer have any failed
2532 * filters, and we have reduced filters below the threshold value.
2534 if (old_overflow && !failed_filters &&
2535 vsi->active_filters < vsi->promisc_threshold) {
2536 dev_info(&pf->pdev->dev,
2537 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2539 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2540 vsi->promisc_threshold = 0;
2543 /* if the VF is not trusted do not do promisc */
2544 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2545 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2549 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2551 /* If we are entering overflow promiscuous, we need to calculate a new
2552 * threshold for when we are safe to exit
2554 if (!old_overflow && new_overflow)
2555 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2557 /* check for changes in promiscuous modes */
2558 if (changed_flags & IFF_ALLMULTI) {
2559 bool cur_multipromisc;
2561 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2562 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2567 retval = i40e_aq_rc_to_posix(aq_ret,
2568 hw->aq.asq_last_status);
2569 dev_info(&pf->pdev->dev,
2570 "set multi promisc failed on %s, err %s aq_err %s\n",
2572 i40e_stat_str(hw, aq_ret),
2573 i40e_aq_str(hw, hw->aq.asq_last_status));
2577 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2580 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2582 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2584 retval = i40e_aq_rc_to_posix(aq_ret,
2585 hw->aq.asq_last_status);
2586 dev_info(&pf->pdev->dev,
2587 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2588 cur_promisc ? "on" : "off",
2590 i40e_stat_str(hw, aq_ret),
2591 i40e_aq_str(hw, hw->aq.asq_last_status));
2595 /* if something went wrong then set the changed flag so we try again */
2597 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2599 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2603 /* Restore elements on the temporary add and delete lists */
2604 spin_lock_bh(&vsi->mac_filter_hash_lock);
2605 err_no_memory_locked:
2606 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2607 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2608 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2610 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2611 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2616 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2617 * @pf: board private structure
2619 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2625 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2627 if (test_bit(__I40E_VF_DISABLE, pf->state)) {
2628 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2632 for (v = 0; v < pf->num_alloc_vsi; v++) {
2634 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
2635 !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
2636 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2639 /* come back and try again later */
2640 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2649 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2652 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2654 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2655 return I40E_RXBUFFER_2048;
2657 return I40E_RXBUFFER_3072;
2661 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2662 * @netdev: network interface device structure
2663 * @new_mtu: new value for maximum frame size
2665 * Returns 0 on success, negative on failure
2667 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2669 struct i40e_netdev_priv *np = netdev_priv(netdev);
2670 struct i40e_vsi *vsi = np->vsi;
2671 struct i40e_pf *pf = vsi->back;
2673 if (i40e_enabled_xdp_vsi(vsi)) {
2674 int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
2676 if (frame_size > i40e_max_xdp_frame_size(vsi))
2680 netdev_info(netdev, "changing MTU from %d to %d\n",
2681 netdev->mtu, new_mtu);
2682 netdev->mtu = new_mtu;
2683 if (netif_running(netdev))
2684 i40e_vsi_reinit_locked(vsi);
2685 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2686 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2691 * i40e_ioctl - Access the hwtstamp interface
2692 * @netdev: network interface device structure
2693 * @ifr: interface request data
2694 * @cmd: ioctl command
2696 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2698 struct i40e_netdev_priv *np = netdev_priv(netdev);
2699 struct i40e_pf *pf = np->vsi->back;
2703 return i40e_ptp_get_ts_config(pf, ifr);
2705 return i40e_ptp_set_ts_config(pf, ifr);
2712 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2713 * @vsi: the vsi being adjusted
2715 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2717 struct i40e_vsi_context ctxt;
2720 /* Don't modify stripping options if a port VLAN is active */
2724 if ((vsi->info.valid_sections &
2725 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2726 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2727 return; /* already enabled */
2729 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2730 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2731 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2733 ctxt.seid = vsi->seid;
2734 ctxt.info = vsi->info;
2735 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2737 dev_info(&vsi->back->pdev->dev,
2738 "update vlan stripping failed, err %s aq_err %s\n",
2739 i40e_stat_str(&vsi->back->hw, ret),
2740 i40e_aq_str(&vsi->back->hw,
2741 vsi->back->hw.aq.asq_last_status));
2746 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2747 * @vsi: the vsi being adjusted
2749 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2751 struct i40e_vsi_context ctxt;
2754 /* Don't modify stripping options if a port VLAN is active */
2758 if ((vsi->info.valid_sections &
2759 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2760 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2761 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2762 return; /* already disabled */
2764 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2765 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2766 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2768 ctxt.seid = vsi->seid;
2769 ctxt.info = vsi->info;
2770 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2772 dev_info(&vsi->back->pdev->dev,
2773 "update vlan stripping failed, err %s aq_err %s\n",
2774 i40e_stat_str(&vsi->back->hw, ret),
2775 i40e_aq_str(&vsi->back->hw,
2776 vsi->back->hw.aq.asq_last_status));
2781 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2782 * @vsi: the vsi being configured
2783 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2785 * This is a helper function for adding a new MAC/VLAN filter with the
2786 * specified VLAN for each existing MAC address already in the hash table.
2787 * This function does *not* perform any accounting to update filters based on
2790 * NOTE: this function expects to be called while under the
2791 * mac_filter_hash_lock
2793 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2795 struct i40e_mac_filter *f, *add_f;
2796 struct hlist_node *h;
2799 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2800 if (f->state == I40E_FILTER_REMOVE)
2802 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2804 dev_info(&vsi->back->pdev->dev,
2805 "Could not add vlan filter %d for %pM\n",
2815 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2816 * @vsi: the VSI being configured
2817 * @vid: VLAN id to be added
2819 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2826 /* The network stack will attempt to add VID=0, with the intention to
2827 * receive priority tagged packets with a VLAN of 0. Our HW receives
2828 * these packets by default when configured to receive untagged
2829 * packets, so we don't need to add a filter for this case.
2830 * Additionally, HW interprets adding a VID=0 filter as meaning to
2831 * receive *only* tagged traffic and stops receiving untagged traffic.
2832 * Thus, we do not want to actually add a filter for VID=0
2837 /* Locked once because all functions invoked below iterates list*/
2838 spin_lock_bh(&vsi->mac_filter_hash_lock);
2839 err = i40e_add_vlan_all_mac(vsi, vid);
2840 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2844 /* schedule our worker thread which will take care of
2845 * applying the new filter changes
2847 i40e_service_event_schedule(vsi->back);
2852 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2853 * @vsi: the vsi being configured
2854 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2856 * This function should be used to remove all VLAN filters which match the
2857 * given VID. It does not schedule the service event and does not take the
2858 * mac_filter_hash_lock so it may be combined with other operations under
2859 * a single invocation of the mac_filter_hash_lock.
2861 * NOTE: this function expects to be called while under the
2862 * mac_filter_hash_lock
2864 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2866 struct i40e_mac_filter *f;
2867 struct hlist_node *h;
2870 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2872 __i40e_del_filter(vsi, f);
2877 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2878 * @vsi: the VSI being configured
2879 * @vid: VLAN id to be removed
2881 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2883 if (!vid || vsi->info.pvid)
2886 spin_lock_bh(&vsi->mac_filter_hash_lock);
2887 i40e_rm_vlan_all_mac(vsi, vid);
2888 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2890 /* schedule our worker thread which will take care of
2891 * applying the new filter changes
2893 i40e_service_event_schedule(vsi->back);
2897 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2898 * @netdev: network interface to be adjusted
2899 * @proto: unused protocol value
2900 * @vid: vlan id to be added
2902 * net_device_ops implementation for adding vlan ids
2904 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2905 __always_unused __be16 proto, u16 vid)
2907 struct i40e_netdev_priv *np = netdev_priv(netdev);
2908 struct i40e_vsi *vsi = np->vsi;
2911 if (vid >= VLAN_N_VID)
2914 ret = i40e_vsi_add_vlan(vsi, vid);
2916 set_bit(vid, vsi->active_vlans);
2922 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2923 * @netdev: network interface to be adjusted
2924 * @proto: unused protocol value
2925 * @vid: vlan id to be added
2927 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2928 __always_unused __be16 proto, u16 vid)
2930 struct i40e_netdev_priv *np = netdev_priv(netdev);
2931 struct i40e_vsi *vsi = np->vsi;
2933 if (vid >= VLAN_N_VID)
2935 set_bit(vid, vsi->active_vlans);
2939 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2940 * @netdev: network interface to be adjusted
2941 * @proto: unused protocol value
2942 * @vid: vlan id to be removed
2944 * net_device_ops implementation for removing vlan ids
2946 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2947 __always_unused __be16 proto, u16 vid)
2949 struct i40e_netdev_priv *np = netdev_priv(netdev);
2950 struct i40e_vsi *vsi = np->vsi;
2952 /* return code is ignored as there is nothing a user
2953 * can do about failure to remove and a log message was
2954 * already printed from the other function
2956 i40e_vsi_kill_vlan(vsi, vid);
2958 clear_bit(vid, vsi->active_vlans);
2964 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2965 * @vsi: the vsi being brought back up
2967 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2974 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2975 i40e_vlan_stripping_enable(vsi);
2977 i40e_vlan_stripping_disable(vsi);
2979 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2980 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2985 * i40e_vsi_add_pvid - Add pvid for the VSI
2986 * @vsi: the vsi being adjusted
2987 * @vid: the vlan id to set as a PVID
2989 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2991 struct i40e_vsi_context ctxt;
2994 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2995 vsi->info.pvid = cpu_to_le16(vid);
2996 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2997 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2998 I40E_AQ_VSI_PVLAN_EMOD_STR;
3000 ctxt.seid = vsi->seid;
3001 ctxt.info = vsi->info;
3002 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3004 dev_info(&vsi->back->pdev->dev,
3005 "add pvid failed, err %s aq_err %s\n",
3006 i40e_stat_str(&vsi->back->hw, ret),
3007 i40e_aq_str(&vsi->back->hw,
3008 vsi->back->hw.aq.asq_last_status));
3016 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3017 * @vsi: the vsi being adjusted
3019 * Just use the vlan_rx_register() service to put it back to normal
3021 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3023 i40e_vlan_stripping_disable(vsi);
3029 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3030 * @vsi: ptr to the VSI
3032 * If this function returns with an error, then it's possible one or
3033 * more of the rings is populated (while the rest are not). It is the
3034 * callers duty to clean those orphaned rings.
3036 * Return 0 on success, negative on failure
3038 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3042 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3043 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
3045 if (!i40e_enabled_xdp_vsi(vsi))
3048 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3049 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3055 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3056 * @vsi: ptr to the VSI
3058 * Free VSI's transmit software resources
3060 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3064 if (vsi->tx_rings) {
3065 for (i = 0; i < vsi->num_queue_pairs; i++)
3066 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3067 i40e_free_tx_resources(vsi->tx_rings[i]);
3070 if (vsi->xdp_rings) {
3071 for (i = 0; i < vsi->num_queue_pairs; i++)
3072 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3073 i40e_free_tx_resources(vsi->xdp_rings[i]);
3078 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3079 * @vsi: ptr to the VSI
3081 * If this function returns with an error, then it's possible one or
3082 * more of the rings is populated (while the rest are not). It is the
3083 * callers duty to clean those orphaned rings.
3085 * Return 0 on success, negative on failure
3087 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3091 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3092 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3097 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3098 * @vsi: ptr to the VSI
3100 * Free all receive software resources
3102 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3109 for (i = 0; i < vsi->num_queue_pairs; i++)
3110 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3111 i40e_free_rx_resources(vsi->rx_rings[i]);
3115 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3116 * @ring: The Tx ring to configure
3118 * This enables/disables XPS for a given Tx descriptor ring
3119 * based on the TCs enabled for the VSI that ring belongs to.
3121 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3125 if (!ring->q_vector || !ring->netdev || ring->ch)
3128 /* We only initialize XPS once, so as not to overwrite user settings */
3129 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3132 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3133 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3138 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3139 * @ring: The Tx ring to configure
3141 * Configure the Tx descriptor ring in the HMC context.
3143 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3145 struct i40e_vsi *vsi = ring->vsi;
3146 u16 pf_q = vsi->base_queue + ring->queue_index;
3147 struct i40e_hw *hw = &vsi->back->hw;
3148 struct i40e_hmc_obj_txq tx_ctx;
3149 i40e_status err = 0;
3152 /* some ATR related tx ring init */
3153 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3154 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3155 ring->atr_count = 0;
3157 ring->atr_sample_rate = 0;
3161 i40e_config_xps_tx_ring(ring);
3163 /* clear the context structure first */
3164 memset(&tx_ctx, 0, sizeof(tx_ctx));
3166 tx_ctx.new_context = 1;
3167 tx_ctx.base = (ring->dma / 128);
3168 tx_ctx.qlen = ring->count;
3169 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3170 I40E_FLAG_FD_ATR_ENABLED));
3171 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3172 /* FDIR VSI tx ring can still use RS bit and writebacks */
3173 if (vsi->type != I40E_VSI_FDIR)
3174 tx_ctx.head_wb_ena = 1;
3175 tx_ctx.head_wb_addr = ring->dma +
3176 (ring->count * sizeof(struct i40e_tx_desc));
3178 /* As part of VSI creation/update, FW allocates certain
3179 * Tx arbitration queue sets for each TC enabled for
3180 * the VSI. The FW returns the handles to these queue
3181 * sets as part of the response buffer to Add VSI,
3182 * Update VSI, etc. AQ commands. It is expected that
3183 * these queue set handles be associated with the Tx
3184 * queues by the driver as part of the TX queue context
3185 * initialization. This has to be done regardless of
3186 * DCB as by default everything is mapped to TC0.
3191 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3194 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3196 tx_ctx.rdylist_act = 0;
3198 /* clear the context in the HMC */
3199 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3201 dev_info(&vsi->back->pdev->dev,
3202 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3203 ring->queue_index, pf_q, err);
3207 /* set the context in the HMC */
3208 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3210 dev_info(&vsi->back->pdev->dev,
3211 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3212 ring->queue_index, pf_q, err);
3216 /* Now associate this queue with this PCI function */
3218 if (ring->ch->type == I40E_VSI_VMDQ2)
3219 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3223 qtx_ctl |= (ring->ch->vsi_number <<
3224 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3225 I40E_QTX_CTL_VFVM_INDX_MASK;
3227 if (vsi->type == I40E_VSI_VMDQ2) {
3228 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3229 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3230 I40E_QTX_CTL_VFVM_INDX_MASK;
3232 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3236 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3237 I40E_QTX_CTL_PF_INDX_MASK);
3238 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3241 /* cache tail off for easier writes later */
3242 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3248 * i40e_configure_rx_ring - Configure a receive ring context
3249 * @ring: The Rx ring to configure
3251 * Configure the Rx descriptor ring in the HMC context.
3253 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3255 struct i40e_vsi *vsi = ring->vsi;
3256 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3257 u16 pf_q = vsi->base_queue + ring->queue_index;
3258 struct i40e_hw *hw = &vsi->back->hw;
3259 struct i40e_hmc_obj_rxq rx_ctx;
3260 i40e_status err = 0;
3262 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3264 /* clear the context structure first */
3265 memset(&rx_ctx, 0, sizeof(rx_ctx));
3267 ring->rx_buf_len = vsi->rx_buf_len;
3269 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3270 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3272 rx_ctx.base = (ring->dma / 128);
3273 rx_ctx.qlen = ring->count;
3275 /* use 32 byte descriptors */
3278 /* descriptor type is always zero
3281 rx_ctx.hsplit_0 = 0;
3283 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3284 if (hw->revision_id == 0)
3285 rx_ctx.lrxqthresh = 0;
3287 rx_ctx.lrxqthresh = 1;
3288 rx_ctx.crcstrip = 1;
3290 /* this controls whether VLAN is stripped from inner headers */
3292 /* set the prefena field to 1 because the manual says to */
3295 /* clear the context in the HMC */
3296 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3298 dev_info(&vsi->back->pdev->dev,
3299 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3300 ring->queue_index, pf_q, err);
3304 /* set the context in the HMC */
3305 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3307 dev_info(&vsi->back->pdev->dev,
3308 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3309 ring->queue_index, pf_q, err);
3313 /* configure Rx buffer alignment */
3314 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3315 clear_ring_build_skb_enabled(ring);
3317 set_ring_build_skb_enabled(ring);
3319 /* cache tail for quicker writes, and clear the reg before use */
3320 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3321 writel(0, ring->tail);
3323 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3329 * i40e_vsi_configure_tx - Configure the VSI for Tx
3330 * @vsi: VSI structure describing this set of rings and resources
3332 * Configure the Tx VSI for operation.
3334 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3339 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3340 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3342 if (!i40e_enabled_xdp_vsi(vsi))
3345 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3346 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3352 * i40e_vsi_configure_rx - Configure the VSI for Rx
3353 * @vsi: the VSI being configured
3355 * Configure the Rx VSI for operation.
3357 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3362 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3363 vsi->max_frame = I40E_MAX_RXBUFFER;
3364 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3365 #if (PAGE_SIZE < 8192)
3366 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3367 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3368 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3369 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3372 vsi->max_frame = I40E_MAX_RXBUFFER;
3373 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3377 /* set up individual rings */
3378 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3379 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3385 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3386 * @vsi: ptr to the VSI
3388 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3390 struct i40e_ring *tx_ring, *rx_ring;
3391 u16 qoffset, qcount;
3394 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3395 /* Reset the TC information */
3396 for (i = 0; i < vsi->num_queue_pairs; i++) {
3397 rx_ring = vsi->rx_rings[i];
3398 tx_ring = vsi->tx_rings[i];
3399 rx_ring->dcb_tc = 0;
3400 tx_ring->dcb_tc = 0;
3405 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3406 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3409 qoffset = vsi->tc_config.tc_info[n].qoffset;
3410 qcount = vsi->tc_config.tc_info[n].qcount;
3411 for (i = qoffset; i < (qoffset + qcount); i++) {
3412 rx_ring = vsi->rx_rings[i];
3413 tx_ring = vsi->tx_rings[i];
3414 rx_ring->dcb_tc = n;
3415 tx_ring->dcb_tc = n;
3421 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3422 * @vsi: ptr to the VSI
3424 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3427 i40e_set_rx_mode(vsi->netdev);
3431 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3432 * @vsi: Pointer to the targeted VSI
3434 * This function replays the hlist on the hw where all the SB Flow Director
3435 * filters were saved.
3437 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3439 struct i40e_fdir_filter *filter;
3440 struct i40e_pf *pf = vsi->back;
3441 struct hlist_node *node;
3443 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3446 /* Reset FDir counters as we're replaying all existing filters */
3447 pf->fd_tcp4_filter_cnt = 0;
3448 pf->fd_udp4_filter_cnt = 0;
3449 pf->fd_sctp4_filter_cnt = 0;
3450 pf->fd_ip4_filter_cnt = 0;
3452 hlist_for_each_entry_safe(filter, node,
3453 &pf->fdir_filter_list, fdir_node) {
3454 i40e_add_del_fdir(vsi, filter, true);
3459 * i40e_vsi_configure - Set up the VSI for action
3460 * @vsi: the VSI being configured
3462 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3466 i40e_set_vsi_rx_mode(vsi);
3467 i40e_restore_vlan(vsi);
3468 i40e_vsi_config_dcb_rings(vsi);
3469 err = i40e_vsi_configure_tx(vsi);
3471 err = i40e_vsi_configure_rx(vsi);
3477 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3478 * @vsi: the VSI being configured
3480 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3482 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3483 struct i40e_pf *pf = vsi->back;
3484 struct i40e_hw *hw = &pf->hw;
3489 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3490 * and PFINT_LNKLSTn registers, e.g.:
3491 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3493 qp = vsi->base_queue;
3494 vector = vsi->base_vector;
3495 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3496 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3498 q_vector->rx.next_update = jiffies + 1;
3499 q_vector->rx.target_itr =
3500 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3501 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3502 q_vector->rx.target_itr >> 1);
3503 q_vector->rx.current_itr = q_vector->rx.target_itr;
3505 q_vector->tx.next_update = jiffies + 1;
3506 q_vector->tx.target_itr =
3507 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3508 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3509 q_vector->tx.target_itr >> 1);
3510 q_vector->tx.current_itr = q_vector->tx.target_itr;
3512 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3513 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3515 /* Linked list for the queuepairs assigned to this vector */
3516 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3517 for (q = 0; q < q_vector->num_ringpairs; q++) {
3518 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3521 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3522 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3523 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3524 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3525 (I40E_QUEUE_TYPE_TX <<
3526 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3528 wr32(hw, I40E_QINT_RQCTL(qp), val);
3531 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3532 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3533 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3534 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3535 (I40E_QUEUE_TYPE_TX <<
3536 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3538 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3541 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3542 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3543 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3544 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3545 (I40E_QUEUE_TYPE_RX <<
3546 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3548 /* Terminate the linked list */
3549 if (q == (q_vector->num_ringpairs - 1))
3550 val |= (I40E_QUEUE_END_OF_LIST <<
3551 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3553 wr32(hw, I40E_QINT_TQCTL(qp), val);
3562 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3563 * @pf: pointer to private device data structure
3565 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3567 struct i40e_hw *hw = &pf->hw;
3570 /* clear things first */
3571 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3572 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3574 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3575 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3576 I40E_PFINT_ICR0_ENA_GRST_MASK |
3577 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3578 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3579 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3580 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3581 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3583 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3584 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3586 if (pf->flags & I40E_FLAG_PTP)
3587 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3589 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3591 /* SW_ITR_IDX = 0, but don't change INTENA */
3592 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3593 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3595 /* OTHER_ITR_IDX = 0 */
3596 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3600 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3601 * @vsi: the VSI being configured
3603 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3605 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3606 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3607 struct i40e_pf *pf = vsi->back;
3608 struct i40e_hw *hw = &pf->hw;
3611 /* set the ITR configuration */
3612 q_vector->rx.next_update = jiffies + 1;
3613 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3614 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3615 q_vector->rx.current_itr = q_vector->rx.target_itr;
3616 q_vector->tx.next_update = jiffies + 1;
3617 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3618 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3619 q_vector->tx.current_itr = q_vector->tx.target_itr;
3621 i40e_enable_misc_int_causes(pf);
3623 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3624 wr32(hw, I40E_PFINT_LNKLST0, 0);
3626 /* Associate the queue pair to the vector and enable the queue int */
3627 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3628 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3629 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3630 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3632 wr32(hw, I40E_QINT_RQCTL(0), val);
3634 if (i40e_enabled_xdp_vsi(vsi)) {
3635 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3636 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3638 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3640 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3643 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3644 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3645 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3647 wr32(hw, I40E_QINT_TQCTL(0), val);
3652 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3653 * @pf: board private structure
3655 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3657 struct i40e_hw *hw = &pf->hw;
3659 wr32(hw, I40E_PFINT_DYN_CTL0,
3660 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3665 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3666 * @pf: board private structure
3668 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3670 struct i40e_hw *hw = &pf->hw;
3673 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3674 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3675 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3677 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3682 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3683 * @irq: interrupt number
3684 * @data: pointer to a q_vector
3686 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3688 struct i40e_q_vector *q_vector = data;
3690 if (!q_vector->tx.ring && !q_vector->rx.ring)
3693 napi_schedule_irqoff(&q_vector->napi);
3699 * i40e_irq_affinity_notify - Callback for affinity changes
3700 * @notify: context as to what irq was changed
3701 * @mask: the new affinity mask
3703 * This is a callback function used by the irq_set_affinity_notifier function
3704 * so that we may register to receive changes to the irq affinity masks.
3706 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3707 const cpumask_t *mask)
3709 struct i40e_q_vector *q_vector =
3710 container_of(notify, struct i40e_q_vector, affinity_notify);
3712 cpumask_copy(&q_vector->affinity_mask, mask);
3716 * i40e_irq_affinity_release - Callback for affinity notifier release
3717 * @ref: internal core kernel usage
3719 * This is a callback function used by the irq_set_affinity_notifier function
3720 * to inform the current notification subscriber that they will no longer
3721 * receive notifications.
3723 static void i40e_irq_affinity_release(struct kref *ref) {}
3726 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3727 * @vsi: the VSI being configured
3728 * @basename: name for the vector
3730 * Allocates MSI-X vectors and requests interrupts from the kernel.
3732 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3734 int q_vectors = vsi->num_q_vectors;
3735 struct i40e_pf *pf = vsi->back;
3736 int base = vsi->base_vector;
3743 for (vector = 0; vector < q_vectors; vector++) {
3744 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3746 irq_num = pf->msix_entries[base + vector].vector;
3748 if (q_vector->tx.ring && q_vector->rx.ring) {
3749 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3750 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3752 } else if (q_vector->rx.ring) {
3753 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3754 "%s-%s-%d", basename, "rx", rx_int_idx++);
3755 } else if (q_vector->tx.ring) {
3756 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3757 "%s-%s-%d", basename, "tx", tx_int_idx++);
3759 /* skip this unused q_vector */
3762 err = request_irq(irq_num,
3768 dev_info(&pf->pdev->dev,
3769 "MSIX request_irq failed, error: %d\n", err);
3770 goto free_queue_irqs;
3773 /* register for affinity change notifications */
3774 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3775 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3776 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3777 /* Spread affinity hints out across online CPUs.
3779 * get_cpu_mask returns a static constant mask with
3780 * a permanent lifetime so it's ok to pass to
3781 * irq_set_affinity_hint without making a copy.
3783 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3784 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3787 vsi->irqs_ready = true;
3793 irq_num = pf->msix_entries[base + vector].vector;
3794 irq_set_affinity_notifier(irq_num, NULL);
3795 irq_set_affinity_hint(irq_num, NULL);
3796 free_irq(irq_num, &vsi->q_vectors[vector]);
3802 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3803 * @vsi: the VSI being un-configured
3805 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3807 struct i40e_pf *pf = vsi->back;
3808 struct i40e_hw *hw = &pf->hw;
3809 int base = vsi->base_vector;
3812 /* disable interrupt causation from each queue */
3813 for (i = 0; i < vsi->num_queue_pairs; i++) {
3816 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3817 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3818 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3820 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3821 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3822 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3824 if (!i40e_enabled_xdp_vsi(vsi))
3826 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3829 /* disable each interrupt */
3830 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3831 for (i = vsi->base_vector;
3832 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3833 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3836 for (i = 0; i < vsi->num_q_vectors; i++)
3837 synchronize_irq(pf->msix_entries[i + base].vector);
3839 /* Legacy and MSI mode - this stops all interrupt handling */
3840 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3841 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3843 synchronize_irq(pf->pdev->irq);
3848 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3849 * @vsi: the VSI being configured
3851 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3853 struct i40e_pf *pf = vsi->back;
3856 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3857 for (i = 0; i < vsi->num_q_vectors; i++)
3858 i40e_irq_dynamic_enable(vsi, i);
3860 i40e_irq_dynamic_enable_icr0(pf);
3863 i40e_flush(&pf->hw);
3868 * i40e_free_misc_vector - Free the vector that handles non-queue events
3869 * @pf: board private structure
3871 static void i40e_free_misc_vector(struct i40e_pf *pf)
3874 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3875 i40e_flush(&pf->hw);
3877 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3878 synchronize_irq(pf->msix_entries[0].vector);
3879 free_irq(pf->msix_entries[0].vector, pf);
3880 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3885 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3886 * @irq: interrupt number
3887 * @data: pointer to a q_vector
3889 * This is the handler used for all MSI/Legacy interrupts, and deals
3890 * with both queue and non-queue interrupts. This is also used in
3891 * MSIX mode to handle the non-queue interrupts.
3893 static irqreturn_t i40e_intr(int irq, void *data)
3895 struct i40e_pf *pf = (struct i40e_pf *)data;
3896 struct i40e_hw *hw = &pf->hw;
3897 irqreturn_t ret = IRQ_NONE;
3898 u32 icr0, icr0_remaining;
3901 icr0 = rd32(hw, I40E_PFINT_ICR0);
3902 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3904 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3905 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3908 /* if interrupt but no bits showing, must be SWINT */
3909 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3910 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3913 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3914 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3915 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3916 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3917 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3920 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3921 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3922 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3923 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3925 /* We do not have a way to disarm Queue causes while leaving
3926 * interrupt enabled for all other causes, ideally
3927 * interrupt should be disabled while we are in NAPI but
3928 * this is not a performance path and napi_schedule()
3929 * can deal with rescheduling.
3931 if (!test_bit(__I40E_DOWN, pf->state))
3932 napi_schedule_irqoff(&q_vector->napi);
3935 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3936 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3937 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3938 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3941 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3942 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3943 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3946 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3947 /* disable any further VFLR event notifications */
3948 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
3949 u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3951 reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
3952 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3954 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3955 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3959 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3960 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3961 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3962 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3963 val = rd32(hw, I40E_GLGEN_RSTAT);
3964 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3965 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3966 if (val == I40E_RESET_CORER) {
3968 } else if (val == I40E_RESET_GLOBR) {
3970 } else if (val == I40E_RESET_EMPR) {
3972 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
3976 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3977 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3978 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3979 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3980 rd32(hw, I40E_PFHMC_ERRORINFO),
3981 rd32(hw, I40E_PFHMC_ERRORDATA));
3984 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3985 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3987 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3988 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3989 i40e_ptp_tx_hwtstamp(pf);
3993 /* If a critical error is pending we have no choice but to reset the
3995 * Report and mask out any remaining unexpected interrupts.
3997 icr0_remaining = icr0 & ena_mask;
3998 if (icr0_remaining) {
3999 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4001 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4002 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4003 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4004 dev_info(&pf->pdev->dev, "device will be reset\n");
4005 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
4006 i40e_service_event_schedule(pf);
4008 ena_mask &= ~icr0_remaining;
4013 /* re-enable interrupt causes */
4014 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4015 if (!test_bit(__I40E_DOWN, pf->state)) {
4016 i40e_service_event_schedule(pf);
4017 i40e_irq_dynamic_enable_icr0(pf);
4024 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4025 * @tx_ring: tx ring to clean
4026 * @budget: how many cleans we're allowed
4028 * Returns true if there's any budget left (e.g. the clean is finished)
4030 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4032 struct i40e_vsi *vsi = tx_ring->vsi;
4033 u16 i = tx_ring->next_to_clean;
4034 struct i40e_tx_buffer *tx_buf;
4035 struct i40e_tx_desc *tx_desc;
4037 tx_buf = &tx_ring->tx_bi[i];
4038 tx_desc = I40E_TX_DESC(tx_ring, i);
4039 i -= tx_ring->count;
4042 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4044 /* if next_to_watch is not set then there is no work pending */
4048 /* prevent any other reads prior to eop_desc */
4051 /* if the descriptor isn't done, no work yet to do */
4052 if (!(eop_desc->cmd_type_offset_bsz &
4053 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4056 /* clear next_to_watch to prevent false hangs */
4057 tx_buf->next_to_watch = NULL;
4059 tx_desc->buffer_addr = 0;
4060 tx_desc->cmd_type_offset_bsz = 0;
4061 /* move past filter desc */
4066 i -= tx_ring->count;
4067 tx_buf = tx_ring->tx_bi;
4068 tx_desc = I40E_TX_DESC(tx_ring, 0);
4070 /* unmap skb header data */
4071 dma_unmap_single(tx_ring->dev,
4072 dma_unmap_addr(tx_buf, dma),
4073 dma_unmap_len(tx_buf, len),
4075 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4076 kfree(tx_buf->raw_buf);
4078 tx_buf->raw_buf = NULL;
4079 tx_buf->tx_flags = 0;
4080 tx_buf->next_to_watch = NULL;
4081 dma_unmap_len_set(tx_buf, len, 0);
4082 tx_desc->buffer_addr = 0;
4083 tx_desc->cmd_type_offset_bsz = 0;
4085 /* move us past the eop_desc for start of next FD desc */
4090 i -= tx_ring->count;
4091 tx_buf = tx_ring->tx_bi;
4092 tx_desc = I40E_TX_DESC(tx_ring, 0);
4095 /* update budget accounting */
4097 } while (likely(budget));
4099 i += tx_ring->count;
4100 tx_ring->next_to_clean = i;
4102 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4103 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4109 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4110 * @irq: interrupt number
4111 * @data: pointer to a q_vector
4113 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4115 struct i40e_q_vector *q_vector = data;
4116 struct i40e_vsi *vsi;
4118 if (!q_vector->tx.ring)
4121 vsi = q_vector->tx.ring->vsi;
4122 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4128 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4129 * @vsi: the VSI being configured
4130 * @v_idx: vector index
4131 * @qp_idx: queue pair index
4133 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4135 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4136 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4137 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4139 tx_ring->q_vector = q_vector;
4140 tx_ring->next = q_vector->tx.ring;
4141 q_vector->tx.ring = tx_ring;
4142 q_vector->tx.count++;
4144 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4145 if (i40e_enabled_xdp_vsi(vsi)) {
4146 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4148 xdp_ring->q_vector = q_vector;
4149 xdp_ring->next = q_vector->tx.ring;
4150 q_vector->tx.ring = xdp_ring;
4151 q_vector->tx.count++;
4154 rx_ring->q_vector = q_vector;
4155 rx_ring->next = q_vector->rx.ring;
4156 q_vector->rx.ring = rx_ring;
4157 q_vector->rx.count++;
4161 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4162 * @vsi: the VSI being configured
4164 * This function maps descriptor rings to the queue-specific vectors
4165 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4166 * one vector per queue pair, but on a constrained vector budget, we
4167 * group the queue pairs as "efficiently" as possible.
4169 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4171 int qp_remaining = vsi->num_queue_pairs;
4172 int q_vectors = vsi->num_q_vectors;
4177 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4178 * group them so there are multiple queues per vector.
4179 * It is also important to go through all the vectors available to be
4180 * sure that if we don't use all the vectors, that the remaining vectors
4181 * are cleared. This is especially important when decreasing the
4182 * number of queues in use.
4184 for (; v_start < q_vectors; v_start++) {
4185 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4187 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4189 q_vector->num_ringpairs = num_ringpairs;
4190 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4192 q_vector->rx.count = 0;
4193 q_vector->tx.count = 0;
4194 q_vector->rx.ring = NULL;
4195 q_vector->tx.ring = NULL;
4197 while (num_ringpairs--) {
4198 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4206 * i40e_vsi_request_irq - Request IRQ from the OS
4207 * @vsi: the VSI being configured
4208 * @basename: name for the vector
4210 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4212 struct i40e_pf *pf = vsi->back;
4215 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4216 err = i40e_vsi_request_irq_msix(vsi, basename);
4217 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4218 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4221 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4225 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4230 #ifdef CONFIG_NET_POLL_CONTROLLER
4232 * i40e_netpoll - A Polling 'interrupt' handler
4233 * @netdev: network interface device structure
4235 * This is used by netconsole to send skbs without having to re-enable
4236 * interrupts. It's not called while the normal interrupt routine is executing.
4238 static void i40e_netpoll(struct net_device *netdev)
4240 struct i40e_netdev_priv *np = netdev_priv(netdev);
4241 struct i40e_vsi *vsi = np->vsi;
4242 struct i40e_pf *pf = vsi->back;
4245 /* if interface is down do nothing */
4246 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4249 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4250 for (i = 0; i < vsi->num_q_vectors; i++)
4251 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4253 i40e_intr(pf->pdev->irq, netdev);
4258 #define I40E_QTX_ENA_WAIT_COUNT 50
4261 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4262 * @pf: the PF being configured
4263 * @pf_q: the PF queue
4264 * @enable: enable or disable state of the queue
4266 * This routine will wait for the given Tx queue of the PF to reach the
4267 * enabled or disabled state.
4268 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4269 * multiple retries; else will return 0 in case of success.
4271 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4276 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4277 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4278 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4281 usleep_range(10, 20);
4283 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4290 * i40e_control_tx_q - Start or stop a particular Tx queue
4291 * @pf: the PF structure
4292 * @pf_q: the PF queue to configure
4293 * @enable: start or stop the queue
4295 * This function enables or disables a single queue. Note that any delay
4296 * required after the operation is expected to be handled by the caller of
4299 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4301 struct i40e_hw *hw = &pf->hw;
4305 /* warn the TX unit of coming changes */
4306 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4308 usleep_range(10, 20);
4310 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4311 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4312 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4313 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4315 usleep_range(1000, 2000);
4318 /* Skip if the queue is already in the requested state */
4319 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4322 /* turn on/off the queue */
4324 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4325 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4327 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4330 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4334 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4336 * @pf: the PF structure
4337 * @pf_q: the PF queue to configure
4338 * @is_xdp: true if the queue is used for XDP
4339 * @enable: start or stop the queue
4341 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4342 bool is_xdp, bool enable)
4346 i40e_control_tx_q(pf, pf_q, enable);
4348 /* wait for the change to finish */
4349 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4351 dev_info(&pf->pdev->dev,
4352 "VSI seid %d %sTx ring %d %sable timeout\n",
4353 seid, (is_xdp ? "XDP " : ""), pf_q,
4354 (enable ? "en" : "dis"));
4361 * i40e_vsi_enable_tx - Start a VSI's rings
4362 * @vsi: the VSI being configured
4364 static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
4366 struct i40e_pf *pf = vsi->back;
4367 int i, pf_q, ret = 0;
4369 pf_q = vsi->base_queue;
4370 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4371 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4373 false /*is xdp*/, true);
4377 if (!i40e_enabled_xdp_vsi(vsi))
4380 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4381 pf_q + vsi->alloc_queue_pairs,
4382 true /*is xdp*/, true);
4390 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4391 * @pf: the PF being configured
4392 * @pf_q: the PF queue
4393 * @enable: enable or disable state of the queue
4395 * This routine will wait for the given Rx queue of the PF to reach the
4396 * enabled or disabled state.
4397 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4398 * multiple retries; else will return 0 in case of success.
4400 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4405 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4406 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4407 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4410 usleep_range(10, 20);
4412 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4419 * i40e_control_rx_q - Start or stop a particular Rx queue
4420 * @pf: the PF structure
4421 * @pf_q: the PF queue to configure
4422 * @enable: start or stop the queue
4424 * This function enables or disables a single queue. Note that
4425 * any delay required after the operation is expected to be
4426 * handled by the caller of this function.
4428 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4430 struct i40e_hw *hw = &pf->hw;
4434 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4435 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4436 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4437 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4439 usleep_range(1000, 2000);
4442 /* Skip if the queue is already in the requested state */
4443 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4446 /* turn on/off the queue */
4448 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4450 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4452 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4456 * i40e_control_wait_rx_q
4457 * @pf: the PF structure
4458 * @pf_q: queue being configured
4459 * @enable: start or stop the rings
4461 * This function enables or disables a single queue along with waiting
4462 * for the change to finish. The caller of this function should handle
4463 * the delays needed in the case of disabling queues.
4465 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4469 i40e_control_rx_q(pf, pf_q, enable);
4471 /* wait for the change to finish */
4472 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4480 * i40e_vsi_enable_rx - Start a VSI's rings
4481 * @vsi: the VSI being configured
4483 static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
4485 struct i40e_pf *pf = vsi->back;
4486 int i, pf_q, ret = 0;
4488 pf_q = vsi->base_queue;
4489 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4490 ret = i40e_control_wait_rx_q(pf, pf_q, true);
4492 dev_info(&pf->pdev->dev,
4493 "VSI seid %d Rx ring %d enable timeout\n",
4503 * i40e_vsi_start_rings - Start a VSI's rings
4504 * @vsi: the VSI being configured
4506 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4510 /* do rx first for enable and last for disable */
4511 ret = i40e_vsi_enable_rx(vsi);
4514 ret = i40e_vsi_enable_tx(vsi);
4519 #define I40E_DISABLE_TX_GAP_MSEC 50
4522 * i40e_vsi_stop_rings - Stop a VSI's rings
4523 * @vsi: the VSI being configured
4525 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4527 struct i40e_pf *pf = vsi->back;
4528 int pf_q, err, q_end;
4530 /* When port TX is suspended, don't wait */
4531 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4532 return i40e_vsi_stop_rings_no_wait(vsi);
4534 q_end = vsi->base_queue + vsi->num_queue_pairs;
4535 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4536 i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
4538 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
4539 err = i40e_control_wait_rx_q(pf, pf_q, false);
4541 dev_info(&pf->pdev->dev,
4542 "VSI seid %d Rx ring %d dissable timeout\n",
4546 msleep(I40E_DISABLE_TX_GAP_MSEC);
4547 pf_q = vsi->base_queue;
4548 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4549 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
4551 i40e_vsi_wait_queues_disabled(vsi);
4555 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4556 * @vsi: the VSI being shutdown
4558 * This function stops all the rings for a VSI but does not delay to verify
4559 * that rings have been disabled. It is expected that the caller is shutting
4560 * down multiple VSIs at once and will delay together for all the VSIs after
4561 * initiating the shutdown. This is particularly useful for shutting down lots
4562 * of VFs together. Otherwise, a large delay can be incurred while configuring
4563 * each VSI in serial.
4565 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4567 struct i40e_pf *pf = vsi->back;
4570 pf_q = vsi->base_queue;
4571 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4572 i40e_control_tx_q(pf, pf_q, false);
4573 i40e_control_rx_q(pf, pf_q, false);
4578 * i40e_vsi_free_irq - Free the irq association with the OS
4579 * @vsi: the VSI being configured
4581 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4583 struct i40e_pf *pf = vsi->back;
4584 struct i40e_hw *hw = &pf->hw;
4585 int base = vsi->base_vector;
4589 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4590 if (!vsi->q_vectors)
4593 if (!vsi->irqs_ready)
4596 vsi->irqs_ready = false;
4597 for (i = 0; i < vsi->num_q_vectors; i++) {
4602 irq_num = pf->msix_entries[vector].vector;
4604 /* free only the irqs that were actually requested */
4605 if (!vsi->q_vectors[i] ||
4606 !vsi->q_vectors[i]->num_ringpairs)
4609 /* clear the affinity notifier in the IRQ descriptor */
4610 irq_set_affinity_notifier(irq_num, NULL);
4611 /* remove our suggested affinity mask for this IRQ */
4612 irq_set_affinity_hint(irq_num, NULL);
4613 synchronize_irq(irq_num);
4614 free_irq(irq_num, vsi->q_vectors[i]);
4616 /* Tear down the interrupt queue link list
4618 * We know that they come in pairs and always
4619 * the Rx first, then the Tx. To clear the
4620 * link list, stick the EOL value into the
4621 * next_q field of the registers.
4623 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4624 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4625 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4626 val |= I40E_QUEUE_END_OF_LIST
4627 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4628 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4630 while (qp != I40E_QUEUE_END_OF_LIST) {
4633 val = rd32(hw, I40E_QINT_RQCTL(qp));
4635 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4636 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4637 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4638 I40E_QINT_RQCTL_INTEVENT_MASK);
4640 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4641 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4643 wr32(hw, I40E_QINT_RQCTL(qp), val);
4645 val = rd32(hw, I40E_QINT_TQCTL(qp));
4647 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4648 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4650 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4651 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4652 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4653 I40E_QINT_TQCTL_INTEVENT_MASK);
4655 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4656 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4658 wr32(hw, I40E_QINT_TQCTL(qp), val);
4663 free_irq(pf->pdev->irq, pf);
4665 val = rd32(hw, I40E_PFINT_LNKLST0);
4666 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4667 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4668 val |= I40E_QUEUE_END_OF_LIST
4669 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4670 wr32(hw, I40E_PFINT_LNKLST0, val);
4672 val = rd32(hw, I40E_QINT_RQCTL(qp));
4673 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4674 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4675 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4676 I40E_QINT_RQCTL_INTEVENT_MASK);
4678 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4679 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4681 wr32(hw, I40E_QINT_RQCTL(qp), val);
4683 val = rd32(hw, I40E_QINT_TQCTL(qp));
4685 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4686 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4687 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4688 I40E_QINT_TQCTL_INTEVENT_MASK);
4690 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4691 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4693 wr32(hw, I40E_QINT_TQCTL(qp), val);
4698 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4699 * @vsi: the VSI being configured
4700 * @v_idx: Index of vector to be freed
4702 * This function frees the memory allocated to the q_vector. In addition if
4703 * NAPI is enabled it will delete any references to the NAPI struct prior
4704 * to freeing the q_vector.
4706 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4708 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4709 struct i40e_ring *ring;
4714 /* disassociate q_vector from rings */
4715 i40e_for_each_ring(ring, q_vector->tx)
4716 ring->q_vector = NULL;
4718 i40e_for_each_ring(ring, q_vector->rx)
4719 ring->q_vector = NULL;
4721 /* only VSI w/ an associated netdev is set up w/ NAPI */
4723 netif_napi_del(&q_vector->napi);
4725 vsi->q_vectors[v_idx] = NULL;
4727 kfree_rcu(q_vector, rcu);
4731 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4732 * @vsi: the VSI being un-configured
4734 * This frees the memory allocated to the q_vectors and
4735 * deletes references to the NAPI struct.
4737 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4741 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4742 i40e_free_q_vector(vsi, v_idx);
4746 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4747 * @pf: board private structure
4749 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4751 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4752 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4753 pci_disable_msix(pf->pdev);
4754 kfree(pf->msix_entries);
4755 pf->msix_entries = NULL;
4756 kfree(pf->irq_pile);
4757 pf->irq_pile = NULL;
4758 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4759 pci_disable_msi(pf->pdev);
4761 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4765 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4766 * @pf: board private structure
4768 * We go through and clear interrupt specific resources and reset the structure
4769 * to pre-load conditions
4771 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4775 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
4776 i40e_free_misc_vector(pf);
4778 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4779 I40E_IWARP_IRQ_PILE_ID);
4781 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4782 for (i = 0; i < pf->num_alloc_vsi; i++)
4784 i40e_vsi_free_q_vectors(pf->vsi[i]);
4785 i40e_reset_interrupt_capability(pf);
4789 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4790 * @vsi: the VSI being configured
4792 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4799 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4800 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4802 if (q_vector->rx.ring || q_vector->tx.ring)
4803 napi_enable(&q_vector->napi);
4808 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4809 * @vsi: the VSI being configured
4811 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4818 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4819 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4821 if (q_vector->rx.ring || q_vector->tx.ring)
4822 napi_disable(&q_vector->napi);
4827 * i40e_vsi_close - Shut down a VSI
4828 * @vsi: the vsi to be quelled
4830 static void i40e_vsi_close(struct i40e_vsi *vsi)
4832 struct i40e_pf *pf = vsi->back;
4833 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4835 i40e_vsi_free_irq(vsi);
4836 i40e_vsi_free_tx_resources(vsi);
4837 i40e_vsi_free_rx_resources(vsi);
4838 vsi->current_netdev_flags = 0;
4839 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
4840 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4841 set_bit(__I40E_CLIENT_RESET, pf->state);
4845 * i40e_quiesce_vsi - Pause a given VSI
4846 * @vsi: the VSI being paused
4848 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4850 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4853 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4854 if (vsi->netdev && netif_running(vsi->netdev))
4855 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4857 i40e_vsi_close(vsi);
4861 * i40e_unquiesce_vsi - Resume a given VSI
4862 * @vsi: the VSI being resumed
4864 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4866 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4869 if (vsi->netdev && netif_running(vsi->netdev))
4870 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4872 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4876 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4879 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4883 for (v = 0; v < pf->num_alloc_vsi; v++) {
4885 i40e_quiesce_vsi(pf->vsi[v]);
4890 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4893 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4897 for (v = 0; v < pf->num_alloc_vsi; v++) {
4899 i40e_unquiesce_vsi(pf->vsi[v]);
4904 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4905 * @vsi: the VSI being configured
4907 * Wait until all queues on a given VSI have been disabled.
4909 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4911 struct i40e_pf *pf = vsi->back;
4914 pf_q = vsi->base_queue;
4915 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4916 /* Check and wait for the Tx queue */
4917 ret = i40e_pf_txq_wait(pf, pf_q, false);
4919 dev_info(&pf->pdev->dev,
4920 "VSI seid %d Tx ring %d disable timeout\n",
4925 if (!i40e_enabled_xdp_vsi(vsi))
4928 /* Check and wait for the XDP Tx queue */
4929 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4932 dev_info(&pf->pdev->dev,
4933 "VSI seid %d XDP Tx ring %d disable timeout\n",
4938 /* Check and wait for the Rx queue */
4939 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4941 dev_info(&pf->pdev->dev,
4942 "VSI seid %d Rx ring %d disable timeout\n",
4951 #ifdef CONFIG_I40E_DCB
4953 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4956 * This function waits for the queues to be in disabled state for all the
4957 * VSIs that are managed by this PF.
4959 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4963 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4965 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4977 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4978 * @pf: pointer to PF
4980 * Get TC map for ISCSI PF type that will include iSCSI TC
4983 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4985 struct i40e_dcb_app_priority_table app;
4986 struct i40e_hw *hw = &pf->hw;
4987 u8 enabled_tc = 1; /* TC0 is always enabled */
4989 /* Get the iSCSI APP TLV */
4990 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4992 for (i = 0; i < dcbcfg->numapps; i++) {
4993 app = dcbcfg->app[i];
4994 if (app.selector == I40E_APP_SEL_TCPIP &&
4995 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4996 tc = dcbcfg->etscfg.prioritytable[app.priority];
4997 enabled_tc |= BIT(tc);
5006 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5007 * @dcbcfg: the corresponding DCBx configuration structure
5009 * Return the number of TCs from given DCBx configuration
5011 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5013 int i, tc_unused = 0;
5017 /* Scan the ETS Config Priority Table to find
5018 * traffic class enabled for a given priority
5019 * and create a bitmask of enabled TCs
5021 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5022 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5024 /* Now scan the bitmask to check for
5025 * contiguous TCs starting with TC0
5027 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5028 if (num_tc & BIT(i)) {
5032 pr_err("Non-contiguous TC - Disabling DCB\n");
5040 /* There is always at least TC0 */
5048 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5049 * @dcbcfg: the corresponding DCBx configuration structure
5051 * Query the current DCB configuration and return the number of
5052 * traffic classes enabled from the given DCBX config
5054 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5056 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5060 for (i = 0; i < num_tc; i++)
5061 enabled_tc |= BIT(i);
5067 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5068 * @pf: PF being queried
5070 * Query the current MQPRIO configuration and return the number of
5071 * traffic classes enabled.
5073 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5075 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5076 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5077 u8 enabled_tc = 1, i;
5079 for (i = 1; i < num_tc; i++)
5080 enabled_tc |= BIT(i);
5085 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5086 * @pf: PF being queried
5088 * Return number of traffic classes enabled for the given PF
5090 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5092 struct i40e_hw *hw = &pf->hw;
5093 u8 i, enabled_tc = 1;
5095 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5097 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5098 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5100 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5101 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5104 /* SFP mode will be enabled for all TCs on port */
5105 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5106 return i40e_dcb_get_num_tc(dcbcfg);
5108 /* MFP mode return count of enabled TCs for this PF */
5109 if (pf->hw.func_caps.iscsi)
5110 enabled_tc = i40e_get_iscsi_tc_map(pf);
5112 return 1; /* Only TC0 */
5114 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5115 if (enabled_tc & BIT(i))
5122 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5123 * @pf: PF being queried
5125 * Return a bitmap for enabled traffic classes for this PF.
5127 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5129 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5130 return i40e_mqprio_get_enabled_tc(pf);
5132 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5135 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5136 return I40E_DEFAULT_TRAFFIC_CLASS;
5138 /* SFP mode we want PF to be enabled for all TCs */
5139 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5140 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5142 /* MFP enabled and iSCSI PF type */
5143 if (pf->hw.func_caps.iscsi)
5144 return i40e_get_iscsi_tc_map(pf);
5146 return I40E_DEFAULT_TRAFFIC_CLASS;
5150 * i40e_vsi_get_bw_info - Query VSI BW Information
5151 * @vsi: the VSI being queried
5153 * Returns 0 on success, negative value on failure
5155 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5157 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5158 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5159 struct i40e_pf *pf = vsi->back;
5160 struct i40e_hw *hw = &pf->hw;
5165 /* Get the VSI level BW configuration */
5166 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5168 dev_info(&pf->pdev->dev,
5169 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5170 i40e_stat_str(&pf->hw, ret),
5171 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5175 /* Get the VSI level BW configuration per TC */
5176 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5179 dev_info(&pf->pdev->dev,
5180 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5181 i40e_stat_str(&pf->hw, ret),
5182 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5186 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5187 dev_info(&pf->pdev->dev,
5188 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5189 bw_config.tc_valid_bits,
5190 bw_ets_config.tc_valid_bits);
5191 /* Still continuing */
5194 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5195 vsi->bw_max_quanta = bw_config.max_bw;
5196 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5197 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5198 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5199 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5200 vsi->bw_ets_limit_credits[i] =
5201 le16_to_cpu(bw_ets_config.credits[i]);
5202 /* 3 bits out of 4 for each TC */
5203 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5210 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5211 * @vsi: the VSI being configured
5212 * @enabled_tc: TC bitmap
5213 * @bw_share: BW shared credits per TC
5215 * Returns 0 on success, negative value on failure
5217 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5220 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5221 struct i40e_pf *pf = vsi->back;
5225 /* There is no need to reset BW when mqprio mode is on. */
5226 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5228 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5229 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5231 dev_info(&pf->pdev->dev,
5232 "Failed to reset tx rate for vsi->seid %u\n",
5236 bw_data.tc_valid_bits = enabled_tc;
5237 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5238 bw_data.tc_bw_credits[i] = bw_share[i];
5240 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5242 dev_info(&pf->pdev->dev,
5243 "AQ command Config VSI BW allocation per TC failed = %d\n",
5244 pf->hw.aq.asq_last_status);
5248 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5249 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5255 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5256 * @vsi: the VSI being configured
5257 * @enabled_tc: TC map to be enabled
5260 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5262 struct net_device *netdev = vsi->netdev;
5263 struct i40e_pf *pf = vsi->back;
5264 struct i40e_hw *hw = &pf->hw;
5267 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5273 netdev_reset_tc(netdev);
5277 /* Set up actual enabled TCs on the VSI */
5278 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5281 /* set per TC queues for the VSI */
5282 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5283 /* Only set TC queues for enabled tcs
5285 * e.g. For a VSI that has TC0 and TC3 enabled the
5286 * enabled_tc bitmap would be 0x00001001; the driver
5287 * will set the numtc for netdev as 2 that will be
5288 * referenced by the netdev layer as TC 0 and 1.
5290 if (vsi->tc_config.enabled_tc & BIT(i))
5291 netdev_set_tc_queue(netdev,
5292 vsi->tc_config.tc_info[i].netdev_tc,
5293 vsi->tc_config.tc_info[i].qcount,
5294 vsi->tc_config.tc_info[i].qoffset);
5297 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5300 /* Assign UP2TC map for the VSI */
5301 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5302 /* Get the actual TC# for the UP */
5303 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5304 /* Get the mapped netdev TC# for the UP */
5305 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5306 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5311 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5312 * @vsi: the VSI being configured
5313 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5315 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5316 struct i40e_vsi_context *ctxt)
5318 /* copy just the sections touched not the entire info
5319 * since not all sections are valid as returned by
5322 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5323 memcpy(&vsi->info.queue_mapping,
5324 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5325 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5326 sizeof(vsi->info.tc_mapping));
5330 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5331 * @vsi: VSI to be configured
5332 * @enabled_tc: TC bitmap
5334 * This configures a particular VSI for TCs that are mapped to the
5335 * given TC bitmap. It uses default bandwidth share for TCs across
5336 * VSIs to configure TC for a particular VSI.
5339 * It is expected that the VSI queues have been quisced before calling
5342 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5344 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5345 struct i40e_pf *pf = vsi->back;
5346 struct i40e_hw *hw = &pf->hw;
5347 struct i40e_vsi_context ctxt;
5351 /* Check if enabled_tc is same as existing or new TCs */
5352 if (vsi->tc_config.enabled_tc == enabled_tc &&
5353 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5356 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5357 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5358 if (enabled_tc & BIT(i))
5362 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5364 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5366 dev_info(&pf->pdev->dev,
5367 "Failed configuring TC map %d for VSI %d\n",
5368 enabled_tc, vsi->seid);
5369 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5372 dev_info(&pf->pdev->dev,
5373 "Failed querying vsi bw info, err %s aq_err %s\n",
5374 i40e_stat_str(hw, ret),
5375 i40e_aq_str(hw, hw->aq.asq_last_status));
5378 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5379 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5382 valid_tc = bw_config.tc_valid_bits;
5383 /* Always enable TC0, no matter what */
5385 dev_info(&pf->pdev->dev,
5386 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5387 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5388 enabled_tc = valid_tc;
5391 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5393 dev_err(&pf->pdev->dev,
5394 "Unable to configure TC map %d for VSI %d\n",
5395 enabled_tc, vsi->seid);
5400 /* Update Queue Pairs Mapping for currently enabled UPs */
5401 ctxt.seid = vsi->seid;
5402 ctxt.pf_num = vsi->back->hw.pf_id;
5404 ctxt.uplink_seid = vsi->uplink_seid;
5405 ctxt.info = vsi->info;
5406 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5407 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5411 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5414 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5417 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5418 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5419 vsi->num_queue_pairs);
5420 ret = i40e_vsi_config_rss(vsi);
5422 dev_info(&vsi->back->pdev->dev,
5423 "Failed to reconfig rss for num_queues\n");
5426 vsi->reconfig_rss = false;
5428 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5429 ctxt.info.valid_sections |=
5430 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5431 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5434 /* Update the VSI after updating the VSI queue-mapping
5437 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5439 dev_info(&pf->pdev->dev,
5440 "Update vsi tc config failed, err %s aq_err %s\n",
5441 i40e_stat_str(hw, ret),
5442 i40e_aq_str(hw, hw->aq.asq_last_status));
5445 /* update the local VSI info with updated queue map */
5446 i40e_vsi_update_queue_map(vsi, &ctxt);
5447 vsi->info.valid_sections = 0;
5449 /* Update current VSI BW information */
5450 ret = i40e_vsi_get_bw_info(vsi);
5452 dev_info(&pf->pdev->dev,
5453 "Failed updating vsi bw info, err %s aq_err %s\n",
5454 i40e_stat_str(hw, ret),
5455 i40e_aq_str(hw, hw->aq.asq_last_status));
5459 /* Update the netdev TC setup */
5460 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5466 * i40e_get_link_speed - Returns link speed for the interface
5467 * @vsi: VSI to be configured
5470 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5472 struct i40e_pf *pf = vsi->back;
5474 switch (pf->hw.phy.link_info.link_speed) {
5475 case I40E_LINK_SPEED_40GB:
5477 case I40E_LINK_SPEED_25GB:
5479 case I40E_LINK_SPEED_20GB:
5481 case I40E_LINK_SPEED_10GB:
5483 case I40E_LINK_SPEED_1GB:
5491 * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
5492 * @vsi: Pointer to vsi structure
5493 * @max_tx_rate: max TX rate in bytes to be converted into Mbits
5495 * Helper function to convert units before send to set BW limit
5497 static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
5499 if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
5500 dev_warn(&vsi->back->pdev->dev,
5501 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5502 max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5504 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
5511 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5512 * @vsi: VSI to be configured
5513 * @seid: seid of the channel/VSI
5514 * @max_tx_rate: max TX rate to be configured as BW limit
5516 * Helper function to set BW limit for a given VSI
5518 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5520 struct i40e_pf *pf = vsi->back;
5525 speed = i40e_get_link_speed(vsi);
5526 if (max_tx_rate > speed) {
5527 dev_err(&pf->pdev->dev,
5528 "Invalid max tx rate %llu specified for VSI seid %d.",
5532 if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
5533 dev_warn(&pf->pdev->dev,
5534 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5535 max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5538 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5539 credits = max_tx_rate;
5540 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5541 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5542 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5544 dev_err(&pf->pdev->dev,
5545 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5546 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5547 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5552 * i40e_remove_queue_channels - Remove queue channels for the TCs
5553 * @vsi: VSI to be configured
5555 * Remove queue channels for the TCs
5557 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5559 enum i40e_admin_queue_err last_aq_status;
5560 struct i40e_cloud_filter *cfilter;
5561 struct i40e_channel *ch, *ch_tmp;
5562 struct i40e_pf *pf = vsi->back;
5563 struct hlist_node *node;
5566 /* Reset rss size that was stored when reconfiguring rss for
5567 * channel VSIs with non-power-of-2 queue count.
5569 vsi->current_rss_size = 0;
5571 /* perform cleanup for channels if they exist */
5572 if (list_empty(&vsi->ch_list))
5575 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5576 struct i40e_vsi *p_vsi;
5578 list_del(&ch->list);
5579 p_vsi = ch->parent_vsi;
5580 if (!p_vsi || !ch->initialized) {
5584 /* Reset queue contexts */
5585 for (i = 0; i < ch->num_queue_pairs; i++) {
5586 struct i40e_ring *tx_ring, *rx_ring;
5589 pf_q = ch->base_queue + i;
5590 tx_ring = vsi->tx_rings[pf_q];
5593 rx_ring = vsi->rx_rings[pf_q];
5597 /* Reset BW configured for this VSI via mqprio */
5598 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5600 dev_info(&vsi->back->pdev->dev,
5601 "Failed to reset tx rate for ch->seid %u\n",
5604 /* delete cloud filters associated with this channel */
5605 hlist_for_each_entry_safe(cfilter, node,
5606 &pf->cloud_filter_list, cloud_node) {
5607 if (cfilter->seid != ch->seid)
5610 hash_del(&cfilter->cloud_node);
5611 if (cfilter->dst_port)
5612 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5616 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5618 last_aq_status = pf->hw.aq.asq_last_status;
5620 dev_info(&pf->pdev->dev,
5621 "Failed to delete cloud filter, err %s aq_err %s\n",
5622 i40e_stat_str(&pf->hw, ret),
5623 i40e_aq_str(&pf->hw, last_aq_status));
5627 /* delete VSI from FW */
5628 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5631 dev_err(&vsi->back->pdev->dev,
5632 "unable to remove channel (%d) for parent VSI(%d)\n",
5633 ch->seid, p_vsi->seid);
5636 INIT_LIST_HEAD(&vsi->ch_list);
5640 * i40e_is_any_channel - channel exist or not
5641 * @vsi: ptr to VSI to which channels are associated with
5643 * Returns true or false if channel(s) exist for associated VSI or not
5645 static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5647 struct i40e_channel *ch, *ch_tmp;
5649 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5650 if (ch->initialized)
5658 * i40e_get_max_queues_for_channel
5659 * @vsi: ptr to VSI to which channels are associated with
5661 * Helper function which returns max value among the queue counts set on the
5662 * channels/TCs created.
5664 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5666 struct i40e_channel *ch, *ch_tmp;
5669 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5670 if (!ch->initialized)
5672 if (ch->num_queue_pairs > max)
5673 max = ch->num_queue_pairs;
5680 * i40e_validate_num_queues - validate num_queues w.r.t channel
5681 * @pf: ptr to PF device
5682 * @num_queues: number of queues
5683 * @vsi: the parent VSI
5684 * @reconfig_rss: indicates should the RSS be reconfigured or not
5686 * This function validates number of queues in the context of new channel
5687 * which is being established and determines if RSS should be reconfigured
5688 * or not for parent VSI.
5690 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5691 struct i40e_vsi *vsi, bool *reconfig_rss)
5698 *reconfig_rss = false;
5699 if (vsi->current_rss_size) {
5700 if (num_queues > vsi->current_rss_size) {
5701 dev_dbg(&pf->pdev->dev,
5702 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5703 num_queues, vsi->current_rss_size);
5705 } else if ((num_queues < vsi->current_rss_size) &&
5706 (!is_power_of_2(num_queues))) {
5707 dev_dbg(&pf->pdev->dev,
5708 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5709 num_queues, vsi->current_rss_size);
5714 if (!is_power_of_2(num_queues)) {
5715 /* Find the max num_queues configured for channel if channel
5717 * if channel exist, then enforce 'num_queues' to be more than
5718 * max ever queues configured for channel.
5720 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5721 if (num_queues < max_ch_queues) {
5722 dev_dbg(&pf->pdev->dev,
5723 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5724 num_queues, max_ch_queues);
5727 *reconfig_rss = true;
5734 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5735 * @vsi: the VSI being setup
5736 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5738 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5740 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5742 struct i40e_pf *pf = vsi->back;
5743 u8 seed[I40E_HKEY_ARRAY_SIZE];
5744 struct i40e_hw *hw = &pf->hw;
5752 if (rss_size > vsi->rss_size)
5755 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5756 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5760 /* Ignoring user configured lut if there is one */
5761 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5763 /* Use user configured hash key if there is one, otherwise
5766 if (vsi->rss_hkey_user)
5767 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5769 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5771 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5773 dev_info(&pf->pdev->dev,
5774 "Cannot set RSS lut, err %s aq_err %s\n",
5775 i40e_stat_str(hw, ret),
5776 i40e_aq_str(hw, hw->aq.asq_last_status));
5782 /* Do the update w.r.t. storing rss_size */
5783 if (!vsi->orig_rss_size)
5784 vsi->orig_rss_size = vsi->rss_size;
5785 vsi->current_rss_size = local_rss_size;
5791 * i40e_channel_setup_queue_map - Setup a channel queue map
5792 * @pf: ptr to PF device
5793 * @vsi: the VSI being setup
5794 * @ctxt: VSI context structure
5795 * @ch: ptr to channel structure
5797 * Setup queue map for a specific channel
5799 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5800 struct i40e_vsi_context *ctxt,
5801 struct i40e_channel *ch)
5803 u16 qcount, qmap, sections = 0;
5807 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5808 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5810 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5811 ch->num_queue_pairs = qcount;
5813 /* find the next higher power-of-2 of num queue pairs */
5814 pow = ilog2(qcount);
5815 if (!is_power_of_2(qcount))
5818 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5819 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5821 /* Setup queue TC[0].qmap for given VSI context */
5822 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5824 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
5825 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5826 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5827 ctxt->info.valid_sections |= cpu_to_le16(sections);
5831 * i40e_add_channel - add a channel by adding VSI
5832 * @pf: ptr to PF device
5833 * @uplink_seid: underlying HW switching element (VEB) ID
5834 * @ch: ptr to channel structure
5836 * Add a channel (VSI) using add_vsi and queue_map
5838 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5839 struct i40e_channel *ch)
5841 struct i40e_hw *hw = &pf->hw;
5842 struct i40e_vsi_context ctxt;
5843 u8 enabled_tc = 0x1; /* TC0 enabled */
5846 if (ch->type != I40E_VSI_VMDQ2) {
5847 dev_info(&pf->pdev->dev,
5848 "add new vsi failed, ch->type %d\n", ch->type);
5852 memset(&ctxt, 0, sizeof(ctxt));
5853 ctxt.pf_num = hw->pf_id;
5855 ctxt.uplink_seid = uplink_seid;
5856 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5857 if (ch->type == I40E_VSI_VMDQ2)
5858 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5860 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5861 ctxt.info.valid_sections |=
5862 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5863 ctxt.info.switch_id =
5864 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5867 /* Set queue map for a given VSI context */
5868 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5870 /* Now time to create VSI */
5871 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5873 dev_info(&pf->pdev->dev,
5874 "add new vsi failed, err %s aq_err %s\n",
5875 i40e_stat_str(&pf->hw, ret),
5876 i40e_aq_str(&pf->hw,
5877 pf->hw.aq.asq_last_status));
5881 /* Success, update channel */
5882 ch->enabled_tc = enabled_tc;
5883 ch->seid = ctxt.seid;
5884 ch->vsi_number = ctxt.vsi_number;
5885 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5887 /* copy just the sections touched not the entire info
5888 * since not all sections are valid as returned by
5891 ch->info.mapping_flags = ctxt.info.mapping_flags;
5892 memcpy(&ch->info.queue_mapping,
5893 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5894 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5895 sizeof(ctxt.info.tc_mapping));
5900 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5903 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5907 bw_data.tc_valid_bits = ch->enabled_tc;
5908 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5909 bw_data.tc_bw_credits[i] = bw_share[i];
5911 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5914 dev_info(&vsi->back->pdev->dev,
5915 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5916 vsi->back->hw.aq.asq_last_status, ch->seid);
5920 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5921 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5927 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5928 * @pf: ptr to PF device
5929 * @vsi: the VSI being setup
5930 * @ch: ptr to channel structure
5932 * Configure TX rings associated with channel (VSI) since queues are being
5935 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5936 struct i40e_vsi *vsi,
5937 struct i40e_channel *ch)
5941 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5943 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5944 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5945 if (ch->enabled_tc & BIT(i))
5949 /* configure BW for new VSI */
5950 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5952 dev_info(&vsi->back->pdev->dev,
5953 "Failed configuring TC map %d for channel (seid %u)\n",
5954 ch->enabled_tc, ch->seid);
5958 for (i = 0; i < ch->num_queue_pairs; i++) {
5959 struct i40e_ring *tx_ring, *rx_ring;
5962 pf_q = ch->base_queue + i;
5964 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5967 tx_ring = vsi->tx_rings[pf_q];
5970 /* Get the RX ring ptr */
5971 rx_ring = vsi->rx_rings[pf_q];
5979 * i40e_setup_hw_channel - setup new channel
5980 * @pf: ptr to PF device
5981 * @vsi: the VSI being setup
5982 * @ch: ptr to channel structure
5983 * @uplink_seid: underlying HW switching element (VEB) ID
5984 * @type: type of channel to be created (VMDq2/VF)
5986 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5987 * and configures TX rings accordingly
5989 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
5990 struct i40e_vsi *vsi,
5991 struct i40e_channel *ch,
5992 u16 uplink_seid, u8 type)
5996 ch->initialized = false;
5997 ch->base_queue = vsi->next_base_queue;
6000 /* Proceed with creation of channel (VMDq2) VSI */
6001 ret = i40e_add_channel(pf, uplink_seid, ch);
6003 dev_info(&pf->pdev->dev,
6004 "failed to add_channel using uplink_seid %u\n",
6009 /* Mark the successful creation of channel */
6010 ch->initialized = true;
6012 /* Reconfigure TX queues using QTX_CTL register */
6013 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6015 dev_info(&pf->pdev->dev,
6016 "failed to configure TX rings for channel %u\n",
6021 /* update 'next_base_queue' */
6022 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6023 dev_dbg(&pf->pdev->dev,
6024 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6025 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6026 ch->num_queue_pairs,
6027 vsi->next_base_queue);
6032 * i40e_setup_channel - setup new channel using uplink element
6033 * @pf: ptr to PF device
6034 * @type: type of channel to be created (VMDq2/VF)
6035 * @uplink_seid: underlying HW switching element (VEB) ID
6036 * @ch: ptr to channel structure
6038 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6039 * and uplink switching element (uplink_seid)
6041 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6042 struct i40e_channel *ch)
6048 if (vsi->type == I40E_VSI_MAIN) {
6049 vsi_type = I40E_VSI_VMDQ2;
6051 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6056 /* underlying switching element */
6057 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6059 /* create channel (VSI), configure TX rings */
6060 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6062 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6066 return ch->initialized ? true : false;
6070 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6071 * @vsi: ptr to VSI which has PF backing
6073 * Sets up switch mode correctly if it needs to be changed and perform
6074 * what are allowed modes.
6076 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6079 struct i40e_pf *pf = vsi->back;
6080 struct i40e_hw *hw = &pf->hw;
6083 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6087 if (hw->dev_caps.switch_mode) {
6088 /* if switch mode is set, support mode2 (non-tunneled for
6089 * cloud filter) for now
6091 u32 switch_mode = hw->dev_caps.switch_mode &
6092 I40E_SWITCH_MODE_MASK;
6093 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6094 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6096 dev_err(&pf->pdev->dev,
6097 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6098 hw->dev_caps.switch_mode);
6103 /* Set Bit 7 to be valid */
6104 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6106 /* Set L4type for TCP support */
6107 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6109 /* Set cloud filter mode */
6110 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6112 /* Prep mode field for set_switch_config */
6113 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6114 pf->last_sw_conf_valid_flags,
6116 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6117 dev_err(&pf->pdev->dev,
6118 "couldn't set switch config bits, err %s aq_err %s\n",
6119 i40e_stat_str(hw, ret),
6121 hw->aq.asq_last_status));
6127 * i40e_create_queue_channel - function to create channel
6128 * @vsi: VSI to be configured
6129 * @ch: ptr to channel (it contains channel specific params)
6131 * This function creates channel (VSI) using num_queues specified by user,
6132 * reconfigs RSS if needed.
6134 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6135 struct i40e_channel *ch)
6137 struct i40e_pf *pf = vsi->back;
6144 if (!ch->num_queue_pairs) {
6145 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6146 ch->num_queue_pairs);
6150 /* validate user requested num_queues for channel */
6151 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6154 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6155 ch->num_queue_pairs);
6159 /* By default we are in VEPA mode, if this is the first VF/VMDq
6160 * VSI to be added switch to VEB mode.
6162 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6163 (!i40e_is_any_channel(vsi))) {
6164 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6165 dev_dbg(&pf->pdev->dev,
6166 "Failed to create channel. Override queues (%u) not power of 2\n",
6167 vsi->tc_config.tc_info[0].qcount);
6171 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6172 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6174 if (vsi->type == I40E_VSI_MAIN) {
6175 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6176 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6179 i40e_do_reset_safe(pf,
6180 I40E_PF_RESET_FLAG);
6183 /* now onwards for main VSI, number of queues will be value
6184 * of TC0's queue count
6188 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6189 * it should be more than num_queues
6191 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6192 dev_dbg(&pf->pdev->dev,
6193 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6194 vsi->cnt_q_avail, ch->num_queue_pairs);
6198 /* reconfig_rss only if vsi type is MAIN_VSI */
6199 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6200 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6202 dev_info(&pf->pdev->dev,
6203 "Error: unable to reconfig rss for num_queues (%u)\n",
6204 ch->num_queue_pairs);
6209 if (!i40e_setup_channel(pf, vsi, ch)) {
6210 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6214 dev_info(&pf->pdev->dev,
6215 "Setup channel (id:%u) utilizing num_queues %d\n",
6216 ch->seid, ch->num_queue_pairs);
6218 /* configure VSI for BW limit */
6219 if (ch->max_tx_rate) {
6220 u64 credits = ch->max_tx_rate;
6222 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6225 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6226 dev_dbg(&pf->pdev->dev,
6227 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6233 /* in case of VF, this will be main SRIOV VSI */
6234 ch->parent_vsi = vsi;
6236 /* and update main_vsi's count for queue_available to use */
6237 vsi->cnt_q_avail -= ch->num_queue_pairs;
6243 * i40e_configure_queue_channels - Add queue channel for the given TCs
6244 * @vsi: VSI to be configured
6246 * Configures queue channel mapping to the given TCs
6248 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6250 struct i40e_channel *ch;
6254 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6255 vsi->tc_seid_map[0] = vsi->seid;
6256 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6257 if (vsi->tc_config.enabled_tc & BIT(i)) {
6258 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6264 INIT_LIST_HEAD(&ch->list);
6265 ch->num_queue_pairs =
6266 vsi->tc_config.tc_info[i].qcount;
6268 vsi->tc_config.tc_info[i].qoffset;
6270 /* Bandwidth limit through tc interface is in bytes/s,
6273 max_rate = vsi->mqprio_qopt.max_rate[i];
6274 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6275 ch->max_tx_rate = max_rate;
6277 list_add_tail(&ch->list, &vsi->ch_list);
6279 ret = i40e_create_queue_channel(vsi, ch);
6281 dev_err(&vsi->back->pdev->dev,
6282 "Failed creating queue channel with TC%d: queues %d\n",
6283 i, ch->num_queue_pairs);
6286 vsi->tc_seid_map[i] = ch->seid;
6292 i40e_remove_queue_channels(vsi);
6297 * i40e_veb_config_tc - Configure TCs for given VEB
6299 * @enabled_tc: TC bitmap
6301 * Configures given TC bitmap for VEB (switching) element
6303 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6305 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6306 struct i40e_pf *pf = veb->pf;
6310 /* No TCs or already enabled TCs just return */
6311 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6314 bw_data.tc_valid_bits = enabled_tc;
6315 /* bw_data.absolute_credits is not set (relative) */
6317 /* Enable ETS TCs with equal BW Share for now */
6318 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6319 if (enabled_tc & BIT(i))
6320 bw_data.tc_bw_share_credits[i] = 1;
6323 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6326 dev_info(&pf->pdev->dev,
6327 "VEB bw config failed, err %s aq_err %s\n",
6328 i40e_stat_str(&pf->hw, ret),
6329 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6333 /* Update the BW information */
6334 ret = i40e_veb_get_bw_info(veb);
6336 dev_info(&pf->pdev->dev,
6337 "Failed getting veb bw config, err %s aq_err %s\n",
6338 i40e_stat_str(&pf->hw, ret),
6339 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6346 #ifdef CONFIG_I40E_DCB
6348 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6351 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6352 * the caller would've quiesce all the VSIs before calling
6355 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6361 /* Enable the TCs available on PF to all VEBs */
6362 tc_map = i40e_pf_get_tc_map(pf);
6363 for (v = 0; v < I40E_MAX_VEB; v++) {
6366 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6368 dev_info(&pf->pdev->dev,
6369 "Failed configuring TC for VEB seid=%d\n",
6371 /* Will try to configure as many components */
6375 /* Update each VSI */
6376 for (v = 0; v < pf->num_alloc_vsi; v++) {
6380 /* - Enable all TCs for the LAN VSI
6381 * - For all others keep them at TC0 for now
6383 if (v == pf->lan_vsi)
6384 tc_map = i40e_pf_get_tc_map(pf);
6386 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6388 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6390 dev_info(&pf->pdev->dev,
6391 "Failed configuring TC for VSI seid=%d\n",
6393 /* Will try to configure as many components */
6395 /* Re-configure VSI vectors based on updated TC map */
6396 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6397 if (pf->vsi[v]->netdev)
6398 i40e_dcbnl_set_all(pf->vsi[v]);
6404 * i40e_resume_port_tx - Resume port Tx
6407 * Resume a port's Tx and issue a PF reset in case of failure to
6410 static int i40e_resume_port_tx(struct i40e_pf *pf)
6412 struct i40e_hw *hw = &pf->hw;
6415 ret = i40e_aq_resume_port_tx(hw, NULL);
6417 dev_info(&pf->pdev->dev,
6418 "Resume Port Tx failed, err %s aq_err %s\n",
6419 i40e_stat_str(&pf->hw, ret),
6420 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6421 /* Schedule PF reset to recover */
6422 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6423 i40e_service_event_schedule(pf);
6430 * i40e_init_pf_dcb - Initialize DCB configuration
6431 * @pf: PF being configured
6433 * Query the current DCB configuration and cache it
6434 * in the hardware structure
6436 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6438 struct i40e_hw *hw = &pf->hw;
6441 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6442 * Also do not enable DCBx if FW LLDP agent is disabled
6444 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
6445 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP))
6448 /* Get the initial DCB configuration */
6449 err = i40e_init_dcb(hw);
6451 /* Device/Function is not DCBX capable */
6452 if ((!hw->func_caps.dcb) ||
6453 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6454 dev_info(&pf->pdev->dev,
6455 "DCBX offload is not supported or is disabled for this PF.\n");
6457 /* When status is not DISABLED then DCBX in FW */
6458 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6459 DCB_CAP_DCBX_VER_IEEE;
6461 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6462 /* Enable DCB tagging only when more than one TC
6463 * or explicitly disable if only one TC
6465 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6466 pf->flags |= I40E_FLAG_DCB_ENABLED;
6468 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6469 dev_dbg(&pf->pdev->dev,
6470 "DCBX offload is supported for this PF.\n");
6472 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6473 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6474 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6476 dev_info(&pf->pdev->dev,
6477 "Query for DCB configuration failed, err %s aq_err %s\n",
6478 i40e_stat_str(&pf->hw, err),
6479 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6485 #endif /* CONFIG_I40E_DCB */
6486 #define SPEED_SIZE 14
6489 * i40e_print_link_message - print link up or down
6490 * @vsi: the VSI for which link needs a message
6491 * @isup: true of link is up, false otherwise
6493 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6495 enum i40e_aq_link_speed new_speed;
6496 struct i40e_pf *pf = vsi->back;
6497 char *speed = "Unknown";
6498 char *fc = "Unknown";
6503 new_speed = pf->hw.phy.link_info.link_speed;
6505 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6507 vsi->current_isup = isup;
6508 vsi->current_speed = new_speed;
6510 netdev_info(vsi->netdev, "NIC Link is Down\n");
6514 /* Warn user if link speed on NPAR enabled partition is not at
6517 if (pf->hw.func_caps.npar_enable &&
6518 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6519 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6520 netdev_warn(vsi->netdev,
6521 "The partition detected link speed that is less than 10Gbps\n");
6523 switch (pf->hw.phy.link_info.link_speed) {
6524 case I40E_LINK_SPEED_40GB:
6527 case I40E_LINK_SPEED_20GB:
6530 case I40E_LINK_SPEED_25GB:
6533 case I40E_LINK_SPEED_10GB:
6536 case I40E_LINK_SPEED_1GB:
6539 case I40E_LINK_SPEED_100MB:
6546 switch (pf->hw.fc.current_mode) {
6550 case I40E_FC_TX_PAUSE:
6553 case I40E_FC_RX_PAUSE:
6561 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6562 req_fec = ", Requested FEC: None";
6563 fec = ", FEC: None";
6564 an = ", Autoneg: False";
6566 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6567 an = ", Autoneg: True";
6569 if (pf->hw.phy.link_info.fec_info &
6570 I40E_AQ_CONFIG_FEC_KR_ENA)
6571 fec = ", FEC: CL74 FC-FEC/BASE-R";
6572 else if (pf->hw.phy.link_info.fec_info &
6573 I40E_AQ_CONFIG_FEC_RS_ENA)
6574 fec = ", FEC: CL108 RS-FEC";
6576 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6577 * both RS and FC are requested
6579 if (vsi->back->hw.phy.link_info.req_fec_info &
6580 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6581 if (vsi->back->hw.phy.link_info.req_fec_info &
6582 I40E_AQ_REQUEST_FEC_RS)
6583 req_fec = ", Requested FEC: CL108 RS-FEC";
6585 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
6589 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6590 speed, req_fec, fec, an, fc);
6594 * i40e_up_complete - Finish the last steps of bringing up a connection
6595 * @vsi: the VSI being configured
6597 static int i40e_up_complete(struct i40e_vsi *vsi)
6599 struct i40e_pf *pf = vsi->back;
6602 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6603 i40e_vsi_configure_msix(vsi);
6605 i40e_configure_msi_and_legacy(vsi);
6608 err = i40e_vsi_start_rings(vsi);
6612 clear_bit(__I40E_VSI_DOWN, vsi->state);
6613 i40e_napi_enable_all(vsi);
6614 i40e_vsi_enable_irq(vsi);
6616 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6618 i40e_print_link_message(vsi, true);
6619 netif_tx_start_all_queues(vsi->netdev);
6620 netif_carrier_on(vsi->netdev);
6623 /* replay FDIR SB filters */
6624 if (vsi->type == I40E_VSI_FDIR) {
6625 /* reset fd counters */
6628 i40e_fdir_filter_restore(vsi);
6631 /* On the next run of the service_task, notify any clients of the new
6634 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6635 i40e_service_event_schedule(pf);
6641 * i40e_vsi_reinit_locked - Reset the VSI
6642 * @vsi: the VSI being configured
6644 * Rebuild the ring structs after some configuration
6645 * has changed, e.g. MTU size.
6647 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6649 struct i40e_pf *pf = vsi->back;
6651 WARN_ON(in_interrupt());
6652 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
6653 usleep_range(1000, 2000);
6657 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6661 * i40e_up - Bring the connection back up after being down
6662 * @vsi: the VSI being configured
6664 int i40e_up(struct i40e_vsi *vsi)
6668 err = i40e_vsi_configure(vsi);
6670 err = i40e_up_complete(vsi);
6676 * i40e_force_link_state - Force the link status
6677 * @pf: board private structure
6678 * @is_up: whether the link state should be forced up or down
6680 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
6682 struct i40e_aq_get_phy_abilities_resp abilities;
6683 struct i40e_aq_set_phy_config config = {0};
6684 struct i40e_hw *hw = &pf->hw;
6689 /* Card might've been put in an unstable state by other drivers
6690 * and applications, which causes incorrect speed values being
6691 * set on startup. In order to clear speed registers, we call
6692 * get_phy_capabilities twice, once to get initial state of
6693 * available speeds, and once to get current PHY config.
6695 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
6698 dev_err(&pf->pdev->dev,
6699 "failed to get phy cap., ret = %s last_status = %s\n",
6700 i40e_stat_str(hw, err),
6701 i40e_aq_str(hw, hw->aq.asq_last_status));
6704 speed = abilities.link_speed;
6706 /* Get the current phy config */
6707 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
6710 dev_err(&pf->pdev->dev,
6711 "failed to get phy cap., ret = %s last_status = %s\n",
6712 i40e_stat_str(hw, err),
6713 i40e_aq_str(hw, hw->aq.asq_last_status));
6717 /* If link needs to go up, but was not forced to go down,
6718 * and its speed values are OK, no need for a flap
6720 if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
6721 return I40E_SUCCESS;
6723 /* To force link we need to set bits for all supported PHY types,
6724 * but there are now more than 32, so we need to split the bitmap
6725 * across two fields.
6727 mask = I40E_PHY_TYPES_BITMASK;
6728 config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6729 config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
6730 /* Copy the old settings, except of phy_type */
6731 config.abilities = abilities.abilities;
6732 if (abilities.link_speed != 0)
6733 config.link_speed = abilities.link_speed;
6735 config.link_speed = speed;
6736 config.eee_capability = abilities.eee_capability;
6737 config.eeer = abilities.eeer_val;
6738 config.low_power_ctrl = abilities.d3_lpan;
6739 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
6740 I40E_AQ_PHY_FEC_CONFIG_MASK;
6741 err = i40e_aq_set_phy_config(hw, &config, NULL);
6744 dev_err(&pf->pdev->dev,
6745 "set phy config ret = %s last_status = %s\n",
6746 i40e_stat_str(&pf->hw, err),
6747 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6751 /* Update the link info */
6752 err = i40e_update_link_info(hw);
6754 /* Wait a little bit (on 40G cards it sometimes takes a really
6755 * long time for link to come back from the atomic reset)
6759 i40e_update_link_info(hw);
6762 i40e_aq_set_link_restart_an(hw, true, NULL);
6764 return I40E_SUCCESS;
6768 * i40e_down - Shutdown the connection processing
6769 * @vsi: the VSI being stopped
6771 void i40e_down(struct i40e_vsi *vsi)
6775 /* It is assumed that the caller of this function
6776 * sets the vsi->state __I40E_VSI_DOWN bit.
6779 netif_carrier_off(vsi->netdev);
6780 netif_tx_disable(vsi->netdev);
6782 i40e_vsi_disable_irq(vsi);
6783 i40e_vsi_stop_rings(vsi);
6784 if (vsi->type == I40E_VSI_MAIN &&
6785 vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
6786 i40e_force_link_state(vsi->back, false);
6787 i40e_napi_disable_all(vsi);
6789 for (i = 0; i < vsi->num_queue_pairs; i++) {
6790 i40e_clean_tx_ring(vsi->tx_rings[i]);
6791 if (i40e_enabled_xdp_vsi(vsi))
6792 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6793 i40e_clean_rx_ring(vsi->rx_rings[i]);
6799 * i40e_validate_mqprio_qopt- validate queue mapping info
6800 * @vsi: the VSI being configured
6801 * @mqprio_qopt: queue parametrs
6803 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6804 struct tc_mqprio_qopt_offload *mqprio_qopt)
6806 u64 sum_max_rate = 0;
6810 if (mqprio_qopt->qopt.offset[0] != 0 ||
6811 mqprio_qopt->qopt.num_tc < 1 ||
6812 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6814 for (i = 0; ; i++) {
6815 if (!mqprio_qopt->qopt.count[i])
6817 if (mqprio_qopt->min_rate[i]) {
6818 dev_err(&vsi->back->pdev->dev,
6819 "Invalid min tx rate (greater than 0) specified\n");
6822 max_rate = mqprio_qopt->max_rate[i];
6823 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6824 sum_max_rate += max_rate;
6826 if (i >= mqprio_qopt->qopt.num_tc - 1)
6828 if (mqprio_qopt->qopt.offset[i + 1] !=
6829 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6832 if (vsi->num_queue_pairs <
6833 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6834 dev_err(&vsi->back->pdev->dev,
6835 "Failed to create traffic channel, insufficient number of queues.\n");
6838 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6839 dev_err(&vsi->back->pdev->dev,
6840 "Invalid max tx rate specified\n");
6847 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6848 * @vsi: the VSI being configured
6850 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6855 /* Only TC0 is enabled */
6856 vsi->tc_config.numtc = 1;
6857 vsi->tc_config.enabled_tc = 1;
6858 qcount = min_t(int, vsi->alloc_queue_pairs,
6859 i40e_pf_get_max_q_per_tc(vsi->back));
6860 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6861 /* For the TC that is not enabled set the offset to to default
6862 * queue and allocate one queue for the given TC.
6864 vsi->tc_config.tc_info[i].qoffset = 0;
6866 vsi->tc_config.tc_info[i].qcount = qcount;
6868 vsi->tc_config.tc_info[i].qcount = 1;
6869 vsi->tc_config.tc_info[i].netdev_tc = 0;
6874 * i40e_setup_tc - configure multiple traffic classes
6875 * @netdev: net device to configure
6876 * @type_data: tc offload data
6878 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
6880 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
6881 struct i40e_netdev_priv *np = netdev_priv(netdev);
6882 struct i40e_vsi *vsi = np->vsi;
6883 struct i40e_pf *pf = vsi->back;
6884 u8 enabled_tc = 0, num_tc, hw;
6885 bool need_reset = false;
6886 int old_queue_pairs;
6891 old_queue_pairs = vsi->num_queue_pairs;
6892 num_tc = mqprio_qopt->qopt.num_tc;
6893 hw = mqprio_qopt->qopt.hw;
6894 mode = mqprio_qopt->mode;
6896 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6897 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
6901 /* Check if MFP enabled */
6902 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
6904 "Configuring TC not supported in MFP mode\n");
6908 case TC_MQPRIO_MODE_DCB:
6909 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6911 /* Check if DCB enabled to continue */
6912 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6914 "DCB is not enabled for adapter\n");
6918 /* Check whether tc count is within enabled limit */
6919 if (num_tc > i40e_pf_get_num_tc(pf)) {
6921 "TC count greater than enabled on link for adapter\n");
6925 case TC_MQPRIO_MODE_CHANNEL:
6926 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
6928 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
6931 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6933 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
6936 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
6937 sizeof(*mqprio_qopt));
6938 pf->flags |= I40E_FLAG_TC_MQPRIO;
6939 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6946 /* Generate TC map for number of tc requested */
6947 for (i = 0; i < num_tc; i++)
6948 enabled_tc |= BIT(i);
6950 /* Requesting same TC configuration as already enabled */
6951 if (enabled_tc == vsi->tc_config.enabled_tc &&
6952 mode != TC_MQPRIO_MODE_CHANNEL)
6955 /* Quiesce VSI queues */
6956 i40e_quiesce_vsi(vsi);
6958 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
6959 i40e_remove_queue_channels(vsi);
6961 /* Configure VSI for enabled TCs */
6962 ret = i40e_vsi_config_tc(vsi, enabled_tc);
6964 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
6970 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
6971 if (vsi->mqprio_qopt.max_rate[0]) {
6972 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
6973 vsi->mqprio_qopt.max_rate[0]);
6975 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
6977 u64 credits = max_tx_rate;
6979 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6980 dev_dbg(&vsi->back->pdev->dev,
6981 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6990 ret = i40e_configure_queue_channels(vsi);
6992 vsi->num_queue_pairs = old_queue_pairs;
6994 "Failed configuring queue channels\n");
7001 /* Reset the configuration data to defaults, only TC0 is enabled */
7003 i40e_vsi_set_default_tc_config(vsi);
7008 i40e_unquiesce_vsi(vsi);
7013 * i40e_set_cld_element - sets cloud filter element data
7014 * @filter: cloud filter rule
7015 * @cld: ptr to cloud filter element data
7017 * This is helper function to copy data into cloud filter element
7020 i40e_set_cld_element(struct i40e_cloud_filter *filter,
7021 struct i40e_aqc_cloud_filters_element_data *cld)
7026 memset(cld, 0, sizeof(*cld));
7027 ether_addr_copy(cld->outer_mac, filter->dst_mac);
7028 ether_addr_copy(cld->inner_mac, filter->src_mac);
7030 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
7033 if (filter->n_proto == ETH_P_IPV6) {
7034 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
7035 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
7037 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
7038 ipa = cpu_to_le32(ipa);
7039 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
7042 ipa = be32_to_cpu(filter->dst_ipv4);
7043 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
7046 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
7048 /* tenant_id is not supported by FW now, once the support is enabled
7049 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
7051 if (filter->tenant_id)
7056 * i40e_add_del_cloud_filter - Add/del cloud filter
7057 * @vsi: pointer to VSI
7058 * @filter: cloud filter rule
7059 * @add: if true, add, if false, delete
7061 * Add or delete a cloud filter for a specific flow spec.
7062 * Returns 0 if the filter were successfully added.
7064 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
7065 struct i40e_cloud_filter *filter, bool add)
7067 struct i40e_aqc_cloud_filters_element_data cld_filter;
7068 struct i40e_pf *pf = vsi->back;
7070 static const u16 flag_table[128] = {
7071 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
7072 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
7073 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
7074 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
7075 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
7076 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
7077 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
7078 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
7079 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
7080 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
7081 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
7082 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
7083 [I40E_CLOUD_FILTER_FLAGS_IIP] =
7084 I40E_AQC_ADD_CLOUD_FILTER_IIP,
7087 if (filter->flags >= ARRAY_SIZE(flag_table))
7088 return I40E_ERR_CONFIG;
7090 memset(&cld_filter, 0, sizeof(cld_filter));
7092 /* copy element needed to add cloud filter from filter */
7093 i40e_set_cld_element(filter, &cld_filter);
7095 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
7096 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
7097 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
7099 if (filter->n_proto == ETH_P_IPV6)
7100 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7101 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7103 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7104 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7107 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
7110 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
7113 dev_dbg(&pf->pdev->dev,
7114 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
7115 add ? "add" : "delete", filter->dst_port, ret,
7116 pf->hw.aq.asq_last_status);
7118 dev_info(&pf->pdev->dev,
7119 "%s cloud filter for VSI: %d\n",
7120 add ? "Added" : "Deleted", filter->seid);
7125 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
7126 * @vsi: pointer to VSI
7127 * @filter: cloud filter rule
7128 * @add: if true, add, if false, delete
7130 * Add or delete a cloud filter for a specific flow spec using big buffer.
7131 * Returns 0 if the filter were successfully added.
7133 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
7134 struct i40e_cloud_filter *filter,
7137 struct i40e_aqc_cloud_filters_element_bb cld_filter;
7138 struct i40e_pf *pf = vsi->back;
7141 /* Both (src/dst) valid mac_addr are not supported */
7142 if ((is_valid_ether_addr(filter->dst_mac) &&
7143 is_valid_ether_addr(filter->src_mac)) ||
7144 (is_multicast_ether_addr(filter->dst_mac) &&
7145 is_multicast_ether_addr(filter->src_mac)))
7148 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
7149 * ports are not supported via big buffer now.
7151 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
7154 /* adding filter using src_port/src_ip is not supported at this stage */
7155 if (filter->src_port ||
7156 (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
7157 !ipv6_addr_any(&filter->ip.v6.src_ip6))
7160 memset(&cld_filter, 0, sizeof(cld_filter));
7162 /* copy element needed to add cloud filter from filter */
7163 i40e_set_cld_element(filter, &cld_filter.element);
7165 if (is_valid_ether_addr(filter->dst_mac) ||
7166 is_valid_ether_addr(filter->src_mac) ||
7167 is_multicast_ether_addr(filter->dst_mac) ||
7168 is_multicast_ether_addr(filter->src_mac)) {
7169 /* MAC + IP : unsupported mode */
7170 if (filter->dst_ipv4)
7173 /* since we validated that L4 port must be valid before
7174 * we get here, start with respective "flags" value
7175 * and update if vlan is present or not
7177 cld_filter.element.flags =
7178 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7180 if (filter->vlan_id) {
7181 cld_filter.element.flags =
7182 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
7185 } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
7186 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7187 cld_filter.element.flags =
7188 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7189 if (filter->n_proto == ETH_P_IPV6)
7190 cld_filter.element.flags |=
7191 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7193 cld_filter.element.flags |=
7194 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7196 dev_err(&pf->pdev->dev,
7197 "either mac or ip has to be valid for cloud filter\n");
7201 /* Now copy L4 port in Byte 6..7 in general fields */
7202 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7203 be16_to_cpu(filter->dst_port);
7206 /* Validate current device switch mode, change if necessary */
7207 ret = i40e_validate_and_set_switch_mode(vsi);
7209 dev_err(&pf->pdev->dev,
7210 "failed to set switch mode, ret %d\n",
7215 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7218 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7223 dev_dbg(&pf->pdev->dev,
7224 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7225 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7227 dev_info(&pf->pdev->dev,
7228 "%s cloud filter for VSI: %d, L4 port: %d\n",
7229 add ? "add" : "delete", filter->seid,
7230 ntohs(filter->dst_port));
7235 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7236 * @vsi: Pointer to VSI
7237 * @cls_flower: Pointer to struct tc_cls_flower_offload
7238 * @filter: Pointer to cloud filter structure
7241 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7242 struct tc_cls_flower_offload *f,
7243 struct i40e_cloud_filter *filter)
7245 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7246 struct i40e_pf *pf = vsi->back;
7249 if (f->dissector->used_keys &
7250 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7251 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7252 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7253 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7254 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7255 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7256 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7257 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7258 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7259 f->dissector->used_keys);
7263 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7264 struct flow_dissector_key_keyid *key =
7265 skb_flow_dissector_target(f->dissector,
7266 FLOW_DISSECTOR_KEY_ENC_KEYID,
7269 struct flow_dissector_key_keyid *mask =
7270 skb_flow_dissector_target(f->dissector,
7271 FLOW_DISSECTOR_KEY_ENC_KEYID,
7274 if (mask->keyid != 0)
7275 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7277 filter->tenant_id = be32_to_cpu(key->keyid);
7280 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
7281 struct flow_dissector_key_basic *key =
7282 skb_flow_dissector_target(f->dissector,
7283 FLOW_DISSECTOR_KEY_BASIC,
7286 struct flow_dissector_key_basic *mask =
7287 skb_flow_dissector_target(f->dissector,
7288 FLOW_DISSECTOR_KEY_BASIC,
7291 n_proto_key = ntohs(key->n_proto);
7292 n_proto_mask = ntohs(mask->n_proto);
7294 if (n_proto_key == ETH_P_ALL) {
7298 filter->n_proto = n_proto_key & n_proto_mask;
7299 filter->ip_proto = key->ip_proto;
7302 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7303 struct flow_dissector_key_eth_addrs *key =
7304 skb_flow_dissector_target(f->dissector,
7305 FLOW_DISSECTOR_KEY_ETH_ADDRS,
7308 struct flow_dissector_key_eth_addrs *mask =
7309 skb_flow_dissector_target(f->dissector,
7310 FLOW_DISSECTOR_KEY_ETH_ADDRS,
7313 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7314 if (!is_zero_ether_addr(mask->dst)) {
7315 if (is_broadcast_ether_addr(mask->dst)) {
7316 field_flags |= I40E_CLOUD_FIELD_OMAC;
7318 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7320 return I40E_ERR_CONFIG;
7324 if (!is_zero_ether_addr(mask->src)) {
7325 if (is_broadcast_ether_addr(mask->src)) {
7326 field_flags |= I40E_CLOUD_FIELD_IMAC;
7328 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7330 return I40E_ERR_CONFIG;
7333 ether_addr_copy(filter->dst_mac, key->dst);
7334 ether_addr_copy(filter->src_mac, key->src);
7337 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
7338 struct flow_dissector_key_vlan *key =
7339 skb_flow_dissector_target(f->dissector,
7340 FLOW_DISSECTOR_KEY_VLAN,
7342 struct flow_dissector_key_vlan *mask =
7343 skb_flow_dissector_target(f->dissector,
7344 FLOW_DISSECTOR_KEY_VLAN,
7347 if (mask->vlan_id) {
7348 if (mask->vlan_id == VLAN_VID_MASK) {
7349 field_flags |= I40E_CLOUD_FIELD_IVLAN;
7352 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7354 return I40E_ERR_CONFIG;
7358 filter->vlan_id = cpu_to_be16(key->vlan_id);
7361 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
7362 struct flow_dissector_key_control *key =
7363 skb_flow_dissector_target(f->dissector,
7364 FLOW_DISSECTOR_KEY_CONTROL,
7367 addr_type = key->addr_type;
7370 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7371 struct flow_dissector_key_ipv4_addrs *key =
7372 skb_flow_dissector_target(f->dissector,
7373 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7375 struct flow_dissector_key_ipv4_addrs *mask =
7376 skb_flow_dissector_target(f->dissector,
7377 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7381 if (mask->dst == cpu_to_be32(0xffffffff)) {
7382 field_flags |= I40E_CLOUD_FIELD_IIP;
7384 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
7386 return I40E_ERR_CONFIG;
7391 if (mask->src == cpu_to_be32(0xffffffff)) {
7392 field_flags |= I40E_CLOUD_FIELD_IIP;
7394 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
7396 return I40E_ERR_CONFIG;
7400 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7401 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7402 return I40E_ERR_CONFIG;
7404 filter->dst_ipv4 = key->dst;
7405 filter->src_ipv4 = key->src;
7408 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7409 struct flow_dissector_key_ipv6_addrs *key =
7410 skb_flow_dissector_target(f->dissector,
7411 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7413 struct flow_dissector_key_ipv6_addrs *mask =
7414 skb_flow_dissector_target(f->dissector,
7415 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7418 /* src and dest IPV6 address should not be LOOPBACK
7419 * (0:0:0:0:0:0:0:1), which can be represented as ::1
7421 if (ipv6_addr_loopback(&key->dst) ||
7422 ipv6_addr_loopback(&key->src)) {
7423 dev_err(&pf->pdev->dev,
7424 "Bad ipv6, addr is LOOPBACK\n");
7425 return I40E_ERR_CONFIG;
7427 if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
7428 field_flags |= I40E_CLOUD_FIELD_IIP;
7430 memcpy(&filter->src_ipv6, &key->src.s6_addr32,
7431 sizeof(filter->src_ipv6));
7432 memcpy(&filter->dst_ipv6, &key->dst.s6_addr32,
7433 sizeof(filter->dst_ipv6));
7436 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
7437 struct flow_dissector_key_ports *key =
7438 skb_flow_dissector_target(f->dissector,
7439 FLOW_DISSECTOR_KEY_PORTS,
7441 struct flow_dissector_key_ports *mask =
7442 skb_flow_dissector_target(f->dissector,
7443 FLOW_DISSECTOR_KEY_PORTS,
7447 if (mask->src == cpu_to_be16(0xffff)) {
7448 field_flags |= I40E_CLOUD_FIELD_IIP;
7450 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7451 be16_to_cpu(mask->src));
7452 return I40E_ERR_CONFIG;
7457 if (mask->dst == cpu_to_be16(0xffff)) {
7458 field_flags |= I40E_CLOUD_FIELD_IIP;
7460 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7461 be16_to_cpu(mask->dst));
7462 return I40E_ERR_CONFIG;
7466 filter->dst_port = key->dst;
7467 filter->src_port = key->src;
7469 switch (filter->ip_proto) {
7474 dev_err(&pf->pdev->dev,
7475 "Only UDP and TCP transport are supported\n");
7479 filter->flags = field_flags;
7484 * i40e_handle_tclass: Forward to a traffic class on the device
7485 * @vsi: Pointer to VSI
7486 * @tc: traffic class index on the device
7487 * @filter: Pointer to cloud filter structure
7490 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
7491 struct i40e_cloud_filter *filter)
7493 struct i40e_channel *ch, *ch_tmp;
7495 /* direct to a traffic class on the same device */
7497 filter->seid = vsi->seid;
7499 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
7500 if (!filter->dst_port) {
7501 dev_err(&vsi->back->pdev->dev,
7502 "Specify destination port to direct to traffic class that is not default\n");
7505 if (list_empty(&vsi->ch_list))
7507 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
7509 if (ch->seid == vsi->tc_seid_map[tc])
7510 filter->seid = ch->seid;
7514 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
7519 * i40e_configure_clsflower - Configure tc flower filters
7520 * @vsi: Pointer to VSI
7521 * @cls_flower: Pointer to struct tc_cls_flower_offload
7524 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7525 struct tc_cls_flower_offload *cls_flower)
7527 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
7528 struct i40e_cloud_filter *filter = NULL;
7529 struct i40e_pf *pf = vsi->back;
7533 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7538 dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
7542 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
7543 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
7546 if (pf->fdir_pf_active_filters ||
7547 (!hlist_empty(&pf->fdir_filter_list))) {
7548 dev_err(&vsi->back->pdev->dev,
7549 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
7553 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
7554 dev_err(&vsi->back->pdev->dev,
7555 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
7556 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7557 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7560 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
7564 filter->cookie = cls_flower->cookie;
7566 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
7570 err = i40e_handle_tclass(vsi, tc, filter);
7574 /* Add cloud filter */
7575 if (filter->dst_port)
7576 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
7578 err = i40e_add_del_cloud_filter(vsi, filter, true);
7581 dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
7586 /* add filter to the ordered list */
7587 INIT_HLIST_NODE(&filter->cloud_node);
7589 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
7591 pf->num_cloud_filters++;
7600 * i40e_find_cloud_filter - Find the could filter in the list
7601 * @vsi: Pointer to VSI
7602 * @cookie: filter specific cookie
7605 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
7606 unsigned long *cookie)
7608 struct i40e_cloud_filter *filter = NULL;
7609 struct hlist_node *node2;
7611 hlist_for_each_entry_safe(filter, node2,
7612 &vsi->back->cloud_filter_list, cloud_node)
7613 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
7619 * i40e_delete_clsflower - Remove tc flower filters
7620 * @vsi: Pointer to VSI
7621 * @cls_flower: Pointer to struct tc_cls_flower_offload
7624 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
7625 struct tc_cls_flower_offload *cls_flower)
7627 struct i40e_cloud_filter *filter = NULL;
7628 struct i40e_pf *pf = vsi->back;
7631 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
7636 hash_del(&filter->cloud_node);
7638 if (filter->dst_port)
7639 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
7641 err = i40e_add_del_cloud_filter(vsi, filter, false);
7645 dev_err(&pf->pdev->dev,
7646 "Failed to delete cloud filter, err %s\n",
7647 i40e_stat_str(&pf->hw, err));
7648 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
7651 pf->num_cloud_filters--;
7652 if (!pf->num_cloud_filters)
7653 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7654 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7655 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7656 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7657 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7663 * i40e_setup_tc_cls_flower - flower classifier offloads
7664 * @netdev: net device to configure
7665 * @type_data: offload data
7667 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
7668 struct tc_cls_flower_offload *cls_flower)
7670 struct i40e_vsi *vsi = np->vsi;
7672 switch (cls_flower->command) {
7673 case TC_CLSFLOWER_REPLACE:
7674 return i40e_configure_clsflower(vsi, cls_flower);
7675 case TC_CLSFLOWER_DESTROY:
7676 return i40e_delete_clsflower(vsi, cls_flower);
7677 case TC_CLSFLOWER_STATS:
7684 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
7687 struct i40e_netdev_priv *np = cb_priv;
7689 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
7693 case TC_SETUP_CLSFLOWER:
7694 return i40e_setup_tc_cls_flower(np, type_data);
7701 static int i40e_setup_tc_block(struct net_device *dev,
7702 struct tc_block_offload *f)
7704 struct i40e_netdev_priv *np = netdev_priv(dev);
7706 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
7709 switch (f->command) {
7711 return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
7713 case TC_BLOCK_UNBIND:
7714 tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
7721 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
7725 case TC_SETUP_QDISC_MQPRIO:
7726 return i40e_setup_tc(netdev, type_data);
7727 case TC_SETUP_BLOCK:
7728 return i40e_setup_tc_block(netdev, type_data);
7735 * i40e_open - Called when a network interface is made active
7736 * @netdev: network interface device structure
7738 * The open entry point is called when a network interface is made
7739 * active by the system (IFF_UP). At this point all resources needed
7740 * for transmit and receive operations are allocated, the interrupt
7741 * handler is registered with the OS, the netdev watchdog subtask is
7742 * enabled, and the stack is notified that the interface is ready.
7744 * Returns 0 on success, negative value on failure
7746 int i40e_open(struct net_device *netdev)
7748 struct i40e_netdev_priv *np = netdev_priv(netdev);
7749 struct i40e_vsi *vsi = np->vsi;
7750 struct i40e_pf *pf = vsi->back;
7753 /* disallow open during test or if eeprom is broken */
7754 if (test_bit(__I40E_TESTING, pf->state) ||
7755 test_bit(__I40E_BAD_EEPROM, pf->state))
7758 netif_carrier_off(netdev);
7760 if (i40e_force_link_state(pf, true))
7763 err = i40e_vsi_open(vsi);
7767 /* configure global TSO hardware offload settings */
7768 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
7769 TCP_FLAG_FIN) >> 16);
7770 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
7772 TCP_FLAG_CWR) >> 16);
7773 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
7775 udp_tunnel_get_rx_info(netdev);
7781 * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
7782 * @vsi: vsi structure
7784 * This updates netdev's number of tx/rx queues
7786 * Returns status of setting tx/rx queues
7788 static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
7792 ret = netif_set_real_num_rx_queues(vsi->netdev,
7793 vsi->num_queue_pairs);
7797 return netif_set_real_num_tx_queues(vsi->netdev,
7798 vsi->num_queue_pairs);
7803 * @vsi: the VSI to open
7805 * Finish initialization of the VSI.
7807 * Returns 0 on success, negative value on failure
7809 * Note: expects to be called while under rtnl_lock()
7811 int i40e_vsi_open(struct i40e_vsi *vsi)
7813 struct i40e_pf *pf = vsi->back;
7814 char int_name[I40E_INT_NAME_STR_LEN];
7817 /* allocate descriptors */
7818 err = i40e_vsi_setup_tx_resources(vsi);
7821 err = i40e_vsi_setup_rx_resources(vsi);
7825 err = i40e_vsi_configure(vsi);
7830 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7831 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
7832 err = i40e_vsi_request_irq(vsi, int_name);
7836 /* Notify the stack of the actual queue counts. */
7837 err = i40e_netif_set_realnum_tx_rx_queues(vsi);
7839 goto err_set_queues;
7841 } else if (vsi->type == I40E_VSI_FDIR) {
7842 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
7843 dev_driver_string(&pf->pdev->dev),
7844 dev_name(&pf->pdev->dev));
7845 err = i40e_vsi_request_irq(vsi, int_name);
7854 err = i40e_up_complete(vsi);
7856 goto err_up_complete;
7863 i40e_vsi_free_irq(vsi);
7865 i40e_vsi_free_rx_resources(vsi);
7867 i40e_vsi_free_tx_resources(vsi);
7868 if (vsi == pf->vsi[pf->lan_vsi])
7869 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
7875 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
7876 * @pf: Pointer to PF
7878 * This function destroys the hlist where all the Flow Director
7879 * filters were saved.
7881 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
7883 struct i40e_fdir_filter *filter;
7884 struct i40e_flex_pit *pit_entry, *tmp;
7885 struct hlist_node *node2;
7887 hlist_for_each_entry_safe(filter, node2,
7888 &pf->fdir_filter_list, fdir_node) {
7889 hlist_del(&filter->fdir_node);
7893 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
7894 list_del(&pit_entry->list);
7897 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
7899 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
7900 list_del(&pit_entry->list);
7903 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
7905 pf->fdir_pf_active_filters = 0;
7906 pf->fd_tcp4_filter_cnt = 0;
7907 pf->fd_udp4_filter_cnt = 0;
7908 pf->fd_sctp4_filter_cnt = 0;
7909 pf->fd_ip4_filter_cnt = 0;
7911 /* Reprogram the default input set for TCP/IPv4 */
7912 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
7913 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7914 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7916 /* Reprogram the default input set for UDP/IPv4 */
7917 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
7918 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7919 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7921 /* Reprogram the default input set for SCTP/IPv4 */
7922 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
7923 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7924 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7926 /* Reprogram the default input set for Other/IPv4 */
7927 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
7928 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
7930 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
7931 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
7935 * i40e_cloud_filter_exit - Cleans up the cloud filters
7936 * @pf: Pointer to PF
7938 * This function destroys the hlist where all the cloud filters
7941 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
7943 struct i40e_cloud_filter *cfilter;
7944 struct hlist_node *node;
7946 hlist_for_each_entry_safe(cfilter, node,
7947 &pf->cloud_filter_list, cloud_node) {
7948 hlist_del(&cfilter->cloud_node);
7951 pf->num_cloud_filters = 0;
7953 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7954 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7955 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7956 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7957 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7962 * i40e_close - Disables a network interface
7963 * @netdev: network interface device structure
7965 * The close entry point is called when an interface is de-activated
7966 * by the OS. The hardware is still under the driver's control, but
7967 * this netdev interface is disabled.
7969 * Returns 0, this is not allowed to fail
7971 int i40e_close(struct net_device *netdev)
7973 struct i40e_netdev_priv *np = netdev_priv(netdev);
7974 struct i40e_vsi *vsi = np->vsi;
7976 i40e_vsi_close(vsi);
7982 * i40e_do_reset - Start a PF or Core Reset sequence
7983 * @pf: board private structure
7984 * @reset_flags: which reset is requested
7985 * @lock_acquired: indicates whether or not the lock has been acquired
7986 * before this function was called.
7988 * The essential difference in resets is that the PF Reset
7989 * doesn't clear the packet buffers, doesn't reset the PE
7990 * firmware, and doesn't bother the other PFs on the chip.
7992 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
7996 WARN_ON(in_interrupt());
7999 /* do the biggest reset indicated */
8000 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
8002 /* Request a Global Reset
8004 * This will start the chip's countdown to the actual full
8005 * chip reset event, and a warning interrupt to be sent
8006 * to all PFs, including the requestor. Our handler
8007 * for the warning interrupt will deal with the shutdown
8008 * and recovery of the switch setup.
8010 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
8011 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8012 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
8013 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8015 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
8017 /* Request a Core Reset
8019 * Same as Global Reset, except does *not* include the MAC/PHY
8021 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
8022 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8023 val |= I40E_GLGEN_RTRIG_CORER_MASK;
8024 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
8025 i40e_flush(&pf->hw);
8027 } else if (reset_flags & I40E_PF_RESET_FLAG) {
8029 /* Request a PF Reset
8031 * Resets only the PF-specific registers
8033 * This goes directly to the tear-down and rebuild of
8034 * the switch, since we need to do all the recovery as
8035 * for the Core Reset.
8037 dev_dbg(&pf->pdev->dev, "PFR requested\n");
8038 i40e_handle_reset_warning(pf, lock_acquired);
8040 } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
8041 /* Request a PF Reset
8043 * Resets PF and reinitializes PFs VSI.
8045 i40e_prep_for_reset(pf, lock_acquired);
8046 i40e_reset_and_rebuild(pf, true, lock_acquired);
8048 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
8051 /* Find the VSI(s) that requested a re-init */
8052 dev_info(&pf->pdev->dev,
8053 "VSI reinit requested\n");
8054 for (v = 0; v < pf->num_alloc_vsi; v++) {
8055 struct i40e_vsi *vsi = pf->vsi[v];
8058 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
8060 i40e_vsi_reinit_locked(pf->vsi[v]);
8062 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
8065 /* Find the VSI(s) that needs to be brought down */
8066 dev_info(&pf->pdev->dev, "VSI down requested\n");
8067 for (v = 0; v < pf->num_alloc_vsi; v++) {
8068 struct i40e_vsi *vsi = pf->vsi[v];
8071 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
8073 set_bit(__I40E_VSI_DOWN, vsi->state);
8078 dev_info(&pf->pdev->dev,
8079 "bad reset request 0x%08x\n", reset_flags);
8083 #ifdef CONFIG_I40E_DCB
8085 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
8086 * @pf: board private structure
8087 * @old_cfg: current DCB config
8088 * @new_cfg: new DCB config
8090 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
8091 struct i40e_dcbx_config *old_cfg,
8092 struct i40e_dcbx_config *new_cfg)
8094 bool need_reconfig = false;
8096 /* Check if ETS configuration has changed */
8097 if (memcmp(&new_cfg->etscfg,
8099 sizeof(new_cfg->etscfg))) {
8100 /* If Priority Table has changed reconfig is needed */
8101 if (memcmp(&new_cfg->etscfg.prioritytable,
8102 &old_cfg->etscfg.prioritytable,
8103 sizeof(new_cfg->etscfg.prioritytable))) {
8104 need_reconfig = true;
8105 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
8108 if (memcmp(&new_cfg->etscfg.tcbwtable,
8109 &old_cfg->etscfg.tcbwtable,
8110 sizeof(new_cfg->etscfg.tcbwtable)))
8111 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
8113 if (memcmp(&new_cfg->etscfg.tsatable,
8114 &old_cfg->etscfg.tsatable,
8115 sizeof(new_cfg->etscfg.tsatable)))
8116 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
8119 /* Check if PFC configuration has changed */
8120 if (memcmp(&new_cfg->pfc,
8122 sizeof(new_cfg->pfc))) {
8123 need_reconfig = true;
8124 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
8127 /* Check if APP Table has changed */
8128 if (memcmp(&new_cfg->app,
8130 sizeof(new_cfg->app))) {
8131 need_reconfig = true;
8132 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
8135 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
8136 return need_reconfig;
8140 * i40e_handle_lldp_event - Handle LLDP Change MIB event
8141 * @pf: board private structure
8142 * @e: event info posted on ARQ
8144 static int i40e_handle_lldp_event(struct i40e_pf *pf,
8145 struct i40e_arq_event_info *e)
8147 struct i40e_aqc_lldp_get_mib *mib =
8148 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
8149 struct i40e_hw *hw = &pf->hw;
8150 struct i40e_dcbx_config tmp_dcbx_cfg;
8151 bool need_reconfig = false;
8155 /* Not DCB capable or capability disabled */
8156 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
8159 /* Ignore if event is not for Nearest Bridge */
8160 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
8161 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
8162 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
8163 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
8166 /* Check MIB Type and return if event for Remote MIB update */
8167 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
8168 dev_dbg(&pf->pdev->dev,
8169 "LLDP event mib type %s\n", type ? "remote" : "local");
8170 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
8171 /* Update the remote cached instance and return */
8172 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
8173 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8174 &hw->remote_dcbx_config);
8178 /* Store the old configuration */
8179 tmp_dcbx_cfg = hw->local_dcbx_config;
8181 /* Reset the old DCBx configuration data */
8182 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
8183 /* Get updated DCBX data from firmware */
8184 ret = i40e_get_dcb_config(&pf->hw);
8186 dev_info(&pf->pdev->dev,
8187 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
8188 i40e_stat_str(&pf->hw, ret),
8189 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8193 /* No change detected in DCBX configs */
8194 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
8195 sizeof(tmp_dcbx_cfg))) {
8196 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
8200 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
8201 &hw->local_dcbx_config);
8203 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
8208 /* Enable DCB tagging only when more than one TC */
8209 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
8210 pf->flags |= I40E_FLAG_DCB_ENABLED;
8212 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8214 set_bit(__I40E_PORT_SUSPENDED, pf->state);
8215 /* Reconfiguration needed quiesce all VSIs */
8216 i40e_pf_quiesce_all_vsi(pf);
8218 /* Changes in configuration update VEB/VSI */
8219 i40e_dcb_reconfigure(pf);
8221 ret = i40e_resume_port_tx(pf);
8223 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
8224 /* In case of error no point in resuming VSIs */
8228 /* Wait for the PF's queues to be disabled */
8229 ret = i40e_pf_wait_queues_disabled(pf);
8231 /* Schedule PF reset to recover */
8232 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8233 i40e_service_event_schedule(pf);
8235 i40e_pf_unquiesce_all_vsi(pf);
8236 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8237 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
8243 #endif /* CONFIG_I40E_DCB */
8246 * i40e_do_reset_safe - Protected reset path for userland calls.
8247 * @pf: board private structure
8248 * @reset_flags: which reset is requested
8251 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8254 i40e_do_reset(pf, reset_flags, true);
8259 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8260 * @pf: board private structure
8261 * @e: event info posted on ARQ
8263 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8266 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8267 struct i40e_arq_event_info *e)
8269 struct i40e_aqc_lan_overflow *data =
8270 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8271 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8272 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8273 struct i40e_hw *hw = &pf->hw;
8277 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8280 /* Queue belongs to VF, find the VF and issue VF reset */
8281 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8282 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8283 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8284 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8285 vf_id -= hw->func_caps.vf_base_id;
8286 vf = &pf->vf[vf_id];
8287 i40e_vc_notify_vf_reset(vf);
8288 /* Allow VF to process pending reset notification */
8290 i40e_reset_vf(vf, false);
8295 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8296 * @pf: board private structure
8298 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
8302 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8303 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8308 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8309 * @pf: board private structure
8311 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
8315 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8316 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8317 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8318 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8323 * i40e_get_global_fd_count - Get total FD filters programmed on device
8324 * @pf: board private structure
8326 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8330 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8331 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8332 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8333 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8338 * i40e_reenable_fdir_sb - Restore FDir SB capability
8339 * @pf: board private structure
8341 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
8343 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
8344 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8345 (I40E_DEBUG_FD & pf->hw.debug_mask))
8346 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8350 * i40e_reenable_fdir_atr - Restore FDir ATR capability
8351 * @pf: board private structure
8353 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
8355 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
8356 /* ATR uses the same filtering logic as SB rules. It only
8357 * functions properly if the input set mask is at the default
8358 * settings. It is safe to restore the default input set
8359 * because there are no active TCPv4 filter rules.
8361 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8362 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8363 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8365 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8366 (I40E_DEBUG_FD & pf->hw.debug_mask))
8367 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8372 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
8373 * @pf: board private structure
8374 * @filter: FDir filter to remove
8376 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
8377 struct i40e_fdir_filter *filter)
8379 /* Update counters */
8380 pf->fdir_pf_active_filters--;
8383 switch (filter->flow_type) {
8385 pf->fd_tcp4_filter_cnt--;
8388 pf->fd_udp4_filter_cnt--;
8391 pf->fd_sctp4_filter_cnt--;
8394 switch (filter->ip4_proto) {
8396 pf->fd_tcp4_filter_cnt--;
8399 pf->fd_udp4_filter_cnt--;
8402 pf->fd_sctp4_filter_cnt--;
8405 pf->fd_ip4_filter_cnt--;
8411 /* Remove the filter from the list and free memory */
8412 hlist_del(&filter->fdir_node);
8417 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8418 * @pf: board private structure
8420 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8422 struct i40e_fdir_filter *filter;
8423 u32 fcnt_prog, fcnt_avail;
8424 struct hlist_node *node;
8426 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8429 /* Check if we have enough room to re-enable FDir SB capability. */
8430 fcnt_prog = i40e_get_global_fd_count(pf);
8431 fcnt_avail = pf->fdir_pf_filter_count;
8432 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8433 (pf->fd_add_err == 0) ||
8434 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
8435 i40e_reenable_fdir_sb(pf);
8437 /* We should wait for even more space before re-enabling ATR.
8438 * Additionally, we cannot enable ATR as long as we still have TCP SB
8441 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
8442 (pf->fd_tcp4_filter_cnt == 0))
8443 i40e_reenable_fdir_atr(pf);
8445 /* if hw had a problem adding a filter, delete it */
8446 if (pf->fd_inv > 0) {
8447 hlist_for_each_entry_safe(filter, node,
8448 &pf->fdir_filter_list, fdir_node)
8449 if (filter->fd_id == pf->fd_inv)
8450 i40e_delete_invalid_filter(pf, filter);
8454 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8455 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8457 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8458 * @pf: board private structure
8460 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8462 unsigned long min_flush_time;
8463 int flush_wait_retry = 50;
8464 bool disable_atr = false;
8468 if (!time_after(jiffies, pf->fd_flush_timestamp +
8469 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8472 /* If the flush is happening too quick and we have mostly SB rules we
8473 * should not re-enable ATR for some time.
8475 min_flush_time = pf->fd_flush_timestamp +
8476 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8477 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8479 if (!(time_after(jiffies, min_flush_time)) &&
8480 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8481 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8482 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8486 pf->fd_flush_timestamp = jiffies;
8487 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8488 /* flush all filters */
8489 wr32(&pf->hw, I40E_PFQF_CTL_1,
8490 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8491 i40e_flush(&pf->hw);
8495 /* Check FD flush status every 5-6msec */
8496 usleep_range(5000, 6000);
8497 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8498 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
8500 } while (flush_wait_retry--);
8501 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
8502 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
8504 /* replay sideband filters */
8505 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
8506 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
8507 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8508 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
8509 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8510 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
8515 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8516 * @pf: board private structure
8518 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
8520 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
8523 /* We can see up to 256 filter programming desc in transit if the filters are
8524 * being applied really fast; before we see the first
8525 * filter miss error on Rx queue 0. Accumulating enough error messages before
8526 * reacting will make sure we don't cause flush too often.
8528 #define I40E_MAX_FD_PROGRAM_ERROR 256
8531 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8532 * @pf: board private structure
8534 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
8537 /* if interface is down do nothing */
8538 if (test_bit(__I40E_DOWN, pf->state))
8541 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8542 i40e_fdir_flush_and_replay(pf);
8544 i40e_fdir_check_and_reenable(pf);
8549 * i40e_vsi_link_event - notify VSI of a link event
8550 * @vsi: vsi to be notified
8551 * @link_up: link up or down
8553 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
8555 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
8558 switch (vsi->type) {
8560 if (!vsi->netdev || !vsi->netdev_registered)
8564 netif_carrier_on(vsi->netdev);
8565 netif_tx_wake_all_queues(vsi->netdev);
8567 netif_carrier_off(vsi->netdev);
8568 netif_tx_stop_all_queues(vsi->netdev);
8572 case I40E_VSI_SRIOV:
8573 case I40E_VSI_VMDQ2:
8575 case I40E_VSI_IWARP:
8576 case I40E_VSI_MIRROR:
8578 /* there is no notification for other VSIs */
8584 * i40e_veb_link_event - notify elements on the veb of a link event
8585 * @veb: veb to be notified
8586 * @link_up: link up or down
8588 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
8593 if (!veb || !veb->pf)
8597 /* depth first... */
8598 for (i = 0; i < I40E_MAX_VEB; i++)
8599 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
8600 i40e_veb_link_event(pf->veb[i], link_up);
8602 /* ... now the local VSIs */
8603 for (i = 0; i < pf->num_alloc_vsi; i++)
8604 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
8605 i40e_vsi_link_event(pf->vsi[i], link_up);
8609 * i40e_link_event - Update netif_carrier status
8610 * @pf: board private structure
8612 static void i40e_link_event(struct i40e_pf *pf)
8614 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8615 u8 new_link_speed, old_link_speed;
8617 bool new_link, old_link;
8619 /* save off old link status information */
8620 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
8622 /* set this to force the get_link_status call to refresh state */
8623 pf->hw.phy.get_link_info = true;
8625 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
8627 status = i40e_get_link_status(&pf->hw, &new_link);
8629 /* On success, disable temp link polling */
8630 if (status == I40E_SUCCESS) {
8631 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
8633 /* Enable link polling temporarily until i40e_get_link_status
8634 * returns I40E_SUCCESS
8636 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
8637 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
8642 old_link_speed = pf->hw.phy.link_info_old.link_speed;
8643 new_link_speed = pf->hw.phy.link_info.link_speed;
8645 if (new_link == old_link &&
8646 new_link_speed == old_link_speed &&
8647 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
8648 new_link == netif_carrier_ok(vsi->netdev)))
8651 i40e_print_link_message(vsi, new_link);
8653 /* Notify the base of the switch tree connected to
8654 * the link. Floating VEBs are not notified.
8656 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
8657 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
8659 i40e_vsi_link_event(vsi, new_link);
8662 i40e_vc_notify_link_state(pf);
8664 if (pf->flags & I40E_FLAG_PTP)
8665 i40e_ptp_set_increment(pf);
8669 * i40e_watchdog_subtask - periodic checks not using event driven response
8670 * @pf: board private structure
8672 static void i40e_watchdog_subtask(struct i40e_pf *pf)
8676 /* if interface is down do nothing */
8677 if (test_bit(__I40E_DOWN, pf->state) ||
8678 test_bit(__I40E_CONFIG_BUSY, pf->state))
8681 /* make sure we don't do these things too often */
8682 if (time_before(jiffies, (pf->service_timer_previous +
8683 pf->service_timer_period)))
8685 pf->service_timer_previous = jiffies;
8687 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
8688 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
8689 i40e_link_event(pf);
8691 /* Update the stats for active netdevs so the network stack
8692 * can look at updated numbers whenever it cares to
8694 for (i = 0; i < pf->num_alloc_vsi; i++)
8695 if (pf->vsi[i] && pf->vsi[i]->netdev)
8696 i40e_update_stats(pf->vsi[i]);
8698 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
8699 /* Update the stats for the active switching components */
8700 for (i = 0; i < I40E_MAX_VEB; i++)
8702 i40e_update_veb_stats(pf->veb[i]);
8705 i40e_ptp_rx_hang(pf);
8706 i40e_ptp_tx_hang(pf);
8710 * i40e_reset_subtask - Set up for resetting the device and driver
8711 * @pf: board private structure
8713 static void i40e_reset_subtask(struct i40e_pf *pf)
8715 u32 reset_flags = 0;
8717 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
8718 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
8719 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
8721 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
8722 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
8723 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8725 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
8726 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
8727 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
8729 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
8730 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
8731 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
8733 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
8734 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
8735 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
8738 /* If there's a recovery already waiting, it takes
8739 * precedence before starting a new reset sequence.
8741 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
8742 i40e_prep_for_reset(pf, false);
8744 i40e_rebuild(pf, false, false);
8747 /* If we're already down or resetting, just bail */
8749 !test_bit(__I40E_DOWN, pf->state) &&
8750 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
8751 i40e_do_reset(pf, reset_flags, false);
8756 * i40e_handle_link_event - Handle link event
8757 * @pf: board private structure
8758 * @e: event info posted on ARQ
8760 static void i40e_handle_link_event(struct i40e_pf *pf,
8761 struct i40e_arq_event_info *e)
8763 struct i40e_aqc_get_link_status *status =
8764 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
8766 /* Do a new status request to re-enable LSE reporting
8767 * and load new status information into the hw struct
8768 * This completely ignores any state information
8769 * in the ARQ event info, instead choosing to always
8770 * issue the AQ update link status command.
8772 i40e_link_event(pf);
8774 /* Check if module meets thermal requirements */
8775 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
8776 dev_err(&pf->pdev->dev,
8777 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
8778 dev_err(&pf->pdev->dev,
8779 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8781 /* check for unqualified module, if link is down, suppress
8782 * the message if link was forced to be down.
8784 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
8785 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
8786 (!(status->link_info & I40E_AQ_LINK_UP)) &&
8787 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
8788 dev_err(&pf->pdev->dev,
8789 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
8790 dev_err(&pf->pdev->dev,
8791 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8797 * i40e_clean_adminq_subtask - Clean the AdminQ rings
8798 * @pf: board private structure
8800 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
8802 struct i40e_arq_event_info event;
8803 struct i40e_hw *hw = &pf->hw;
8810 /* Do not run clean AQ when PF reset fails */
8811 if (test_bit(__I40E_RESET_FAILED, pf->state))
8814 /* check for error indications */
8815 val = rd32(&pf->hw, pf->hw.aq.arq.len);
8817 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
8818 if (hw->debug_mask & I40E_DEBUG_AQ)
8819 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
8820 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
8822 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
8823 if (hw->debug_mask & I40E_DEBUG_AQ)
8824 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
8825 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
8826 pf->arq_overflows++;
8828 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
8829 if (hw->debug_mask & I40E_DEBUG_AQ)
8830 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
8831 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
8834 wr32(&pf->hw, pf->hw.aq.arq.len, val);
8836 val = rd32(&pf->hw, pf->hw.aq.asq.len);
8838 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
8839 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8840 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
8841 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
8843 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
8844 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8845 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
8846 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
8848 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
8849 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8850 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
8851 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
8854 wr32(&pf->hw, pf->hw.aq.asq.len, val);
8856 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
8857 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
8862 ret = i40e_clean_arq_element(hw, &event, &pending);
8863 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
8866 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
8870 opcode = le16_to_cpu(event.desc.opcode);
8873 case i40e_aqc_opc_get_link_status:
8874 i40e_handle_link_event(pf, &event);
8876 case i40e_aqc_opc_send_msg_to_pf:
8877 ret = i40e_vc_process_vf_msg(pf,
8878 le16_to_cpu(event.desc.retval),
8879 le32_to_cpu(event.desc.cookie_high),
8880 le32_to_cpu(event.desc.cookie_low),
8884 case i40e_aqc_opc_lldp_update_mib:
8885 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
8886 #ifdef CONFIG_I40E_DCB
8888 ret = i40e_handle_lldp_event(pf, &event);
8890 #endif /* CONFIG_I40E_DCB */
8892 case i40e_aqc_opc_event_lan_overflow:
8893 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
8894 i40e_handle_lan_overflow_event(pf, &event);
8896 case i40e_aqc_opc_send_msg_to_peer:
8897 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
8899 case i40e_aqc_opc_nvm_erase:
8900 case i40e_aqc_opc_nvm_update:
8901 case i40e_aqc_opc_oem_post_update:
8902 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
8903 "ARQ NVM operation 0x%04x completed\n",
8907 dev_info(&pf->pdev->dev,
8908 "ARQ: Unknown event 0x%04x ignored\n",
8912 } while (i++ < pf->adminq_work_limit);
8914 if (i < pf->adminq_work_limit)
8915 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
8917 /* re-enable Admin queue interrupt cause */
8918 val = rd32(hw, I40E_PFINT_ICR0_ENA);
8919 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
8920 wr32(hw, I40E_PFINT_ICR0_ENA, val);
8923 kfree(event.msg_buf);
8927 * i40e_verify_eeprom - make sure eeprom is good to use
8928 * @pf: board private structure
8930 static void i40e_verify_eeprom(struct i40e_pf *pf)
8934 err = i40e_diag_eeprom_test(&pf->hw);
8936 /* retry in case of garbage read */
8937 err = i40e_diag_eeprom_test(&pf->hw);
8939 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
8941 set_bit(__I40E_BAD_EEPROM, pf->state);
8945 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
8946 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
8947 clear_bit(__I40E_BAD_EEPROM, pf->state);
8952 * i40e_enable_pf_switch_lb
8953 * @pf: pointer to the PF structure
8955 * enable switch loop back or die - no point in a return value
8957 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
8959 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8960 struct i40e_vsi_context ctxt;
8963 ctxt.seid = pf->main_vsi_seid;
8964 ctxt.pf_num = pf->hw.pf_id;
8966 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8968 dev_info(&pf->pdev->dev,
8969 "couldn't get PF vsi config, err %s aq_err %s\n",
8970 i40e_stat_str(&pf->hw, ret),
8971 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8974 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8975 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8976 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8978 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8980 dev_info(&pf->pdev->dev,
8981 "update vsi switch failed, err %s aq_err %s\n",
8982 i40e_stat_str(&pf->hw, ret),
8983 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8988 * i40e_disable_pf_switch_lb
8989 * @pf: pointer to the PF structure
8991 * disable switch loop back or die - no point in a return value
8993 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
8995 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8996 struct i40e_vsi_context ctxt;
8999 ctxt.seid = pf->main_vsi_seid;
9000 ctxt.pf_num = pf->hw.pf_id;
9002 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
9004 dev_info(&pf->pdev->dev,
9005 "couldn't get PF vsi config, err %s aq_err %s\n",
9006 i40e_stat_str(&pf->hw, ret),
9007 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9010 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
9011 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
9012 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
9014 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
9016 dev_info(&pf->pdev->dev,
9017 "update vsi switch failed, err %s aq_err %s\n",
9018 i40e_stat_str(&pf->hw, ret),
9019 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9024 * i40e_config_bridge_mode - Configure the HW bridge mode
9025 * @veb: pointer to the bridge instance
9027 * Configure the loop back mode for the LAN VSI that is downlink to the
9028 * specified HW bridge instance. It is expected this function is called
9029 * when a new HW bridge is instantiated.
9031 static void i40e_config_bridge_mode(struct i40e_veb *veb)
9033 struct i40e_pf *pf = veb->pf;
9035 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
9036 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
9037 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
9038 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
9039 i40e_disable_pf_switch_lb(pf);
9041 i40e_enable_pf_switch_lb(pf);
9045 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
9046 * @veb: pointer to the VEB instance
9048 * This is a recursive function that first builds the attached VSIs then
9049 * recurses in to build the next layer of VEB. We track the connections
9050 * through our own index numbers because the seid's from the HW could
9051 * change across the reset.
9053 static int i40e_reconstitute_veb(struct i40e_veb *veb)
9055 struct i40e_vsi *ctl_vsi = NULL;
9056 struct i40e_pf *pf = veb->pf;
9060 /* build VSI that owns this VEB, temporarily attached to base VEB */
9061 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
9063 pf->vsi[v]->veb_idx == veb->idx &&
9064 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
9065 ctl_vsi = pf->vsi[v];
9070 dev_info(&pf->pdev->dev,
9071 "missing owner VSI for veb_idx %d\n", veb->idx);
9073 goto end_reconstitute;
9075 if (ctl_vsi != pf->vsi[pf->lan_vsi])
9076 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9077 ret = i40e_add_vsi(ctl_vsi);
9079 dev_info(&pf->pdev->dev,
9080 "rebuild of veb_idx %d owner VSI failed: %d\n",
9082 goto end_reconstitute;
9084 i40e_vsi_reset_stats(ctl_vsi);
9086 /* create the VEB in the switch and move the VSI onto the VEB */
9087 ret = i40e_add_veb(veb, ctl_vsi);
9089 goto end_reconstitute;
9091 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
9092 veb->bridge_mode = BRIDGE_MODE_VEB;
9094 veb->bridge_mode = BRIDGE_MODE_VEPA;
9095 i40e_config_bridge_mode(veb);
9097 /* create the remaining VSIs attached to this VEB */
9098 for (v = 0; v < pf->num_alloc_vsi; v++) {
9099 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
9102 if (pf->vsi[v]->veb_idx == veb->idx) {
9103 struct i40e_vsi *vsi = pf->vsi[v];
9105 vsi->uplink_seid = veb->seid;
9106 ret = i40e_add_vsi(vsi);
9108 dev_info(&pf->pdev->dev,
9109 "rebuild of vsi_idx %d failed: %d\n",
9111 goto end_reconstitute;
9113 i40e_vsi_reset_stats(vsi);
9117 /* create any VEBs attached to this VEB - RECURSION */
9118 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9119 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
9120 pf->veb[veb_idx]->uplink_seid = veb->seid;
9121 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
9132 * i40e_get_capabilities - get info about the HW
9133 * @pf: the PF struct
9135 static int i40e_get_capabilities(struct i40e_pf *pf,
9136 enum i40e_admin_queue_opc list_type)
9138 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
9143 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
9145 cap_buf = kzalloc(buf_len, GFP_KERNEL);
9149 /* this loads the data into the hw struct for us */
9150 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
9151 &data_size, list_type,
9153 /* data loaded, buffer no longer needed */
9156 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
9157 /* retry with a larger buffer */
9158 buf_len = data_size;
9159 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
9160 dev_info(&pf->pdev->dev,
9161 "capability discovery failed, err %s aq_err %s\n",
9162 i40e_stat_str(&pf->hw, err),
9163 i40e_aq_str(&pf->hw,
9164 pf->hw.aq.asq_last_status));
9169 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
9170 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9171 dev_info(&pf->pdev->dev,
9172 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
9173 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
9174 pf->hw.func_caps.num_msix_vectors,
9175 pf->hw.func_caps.num_msix_vectors_vf,
9176 pf->hw.func_caps.fd_filters_guaranteed,
9177 pf->hw.func_caps.fd_filters_best_effort,
9178 pf->hw.func_caps.num_tx_qp,
9179 pf->hw.func_caps.num_vsis);
9180 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
9181 dev_info(&pf->pdev->dev,
9182 "switch_mode=0x%04x, function_valid=0x%08x\n",
9183 pf->hw.dev_caps.switch_mode,
9184 pf->hw.dev_caps.valid_functions);
9185 dev_info(&pf->pdev->dev,
9186 "SR-IOV=%d, num_vfs for all function=%u\n",
9187 pf->hw.dev_caps.sr_iov_1_1,
9188 pf->hw.dev_caps.num_vfs);
9189 dev_info(&pf->pdev->dev,
9190 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
9191 pf->hw.dev_caps.num_vsis,
9192 pf->hw.dev_caps.num_rx_qp,
9193 pf->hw.dev_caps.num_tx_qp);
9196 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9197 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
9198 + pf->hw.func_caps.num_vfs)
9199 if (pf->hw.revision_id == 0 &&
9200 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
9201 dev_info(&pf->pdev->dev,
9202 "got num_vsis %d, setting num_vsis to %d\n",
9203 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
9204 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
9210 static int i40e_vsi_clear(struct i40e_vsi *vsi);
9213 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
9214 * @pf: board private structure
9216 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
9218 struct i40e_vsi *vsi;
9220 /* quick workaround for an NVM issue that leaves a critical register
9223 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
9224 static const u32 hkey[] = {
9225 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
9226 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
9227 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
9231 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
9232 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
9235 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9238 /* find existing VSI and see if it needs configuring */
9239 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9241 /* create a new VSI if none exists */
9243 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
9244 pf->vsi[pf->lan_vsi]->seid, 0);
9246 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
9247 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9248 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
9253 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
9257 * i40e_fdir_teardown - release the Flow Director resources
9258 * @pf: board private structure
9260 static void i40e_fdir_teardown(struct i40e_pf *pf)
9262 struct i40e_vsi *vsi;
9264 i40e_fdir_filter_exit(pf);
9265 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9267 i40e_vsi_release(vsi);
9271 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
9273 * @seid: seid of main or channel VSIs
9275 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
9276 * existed before reset
9278 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9280 struct i40e_cloud_filter *cfilter;
9281 struct i40e_pf *pf = vsi->back;
9282 struct hlist_node *node;
9285 /* Add cloud filters back if they exist */
9286 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9288 if (cfilter->seid != seid)
9291 if (cfilter->dst_port)
9292 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9295 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
9298 dev_dbg(&pf->pdev->dev,
9299 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9300 i40e_stat_str(&pf->hw, ret),
9301 i40e_aq_str(&pf->hw,
9302 pf->hw.aq.asq_last_status));
9310 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9313 * Rebuilds channel VSIs if they existed before reset
9315 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9317 struct i40e_channel *ch, *ch_tmp;
9320 if (list_empty(&vsi->ch_list))
9323 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9324 if (!ch->initialized)
9326 /* Proceed with creation of channel (VMDq2) VSI */
9327 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9329 dev_info(&vsi->back->pdev->dev,
9330 "failed to rebuild channels using uplink_seid %u\n",
9334 /* Reconfigure TX queues using QTX_CTL register */
9335 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
9337 dev_info(&vsi->back->pdev->dev,
9338 "failed to configure TX rings for channel %u\n",
9342 /* update 'next_base_queue' */
9343 vsi->next_base_queue = vsi->next_base_queue +
9344 ch->num_queue_pairs;
9345 if (ch->max_tx_rate) {
9346 u64 credits = ch->max_tx_rate;
9348 if (i40e_set_bw_limit(vsi, ch->seid,
9352 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9353 dev_dbg(&vsi->back->pdev->dev,
9354 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9359 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9361 dev_dbg(&vsi->back->pdev->dev,
9362 "Failed to rebuild cloud filters for channel VSI %u\n",
9371 * i40e_clean_xps_state - clean xps state for every tx_ring
9372 * @vsi: ptr to the VSI
9374 static void i40e_clean_xps_state(struct i40e_vsi *vsi)
9379 for (i = 0; i < vsi->num_queue_pairs; i++)
9380 if (vsi->tx_rings[i])
9381 clear_bit(__I40E_TX_XPS_INIT_DONE,
9382 vsi->tx_rings[i]->state);
9386 * i40e_prep_for_reset - prep for the core to reset
9387 * @pf: board private structure
9388 * @lock_acquired: indicates whether or not the lock has been acquired
9389 * before this function was called.
9391 * Close up the VFs and other things in prep for PF Reset.
9393 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
9395 struct i40e_hw *hw = &pf->hw;
9396 i40e_status ret = 0;
9399 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9400 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9402 if (i40e_check_asq_alive(&pf->hw))
9403 i40e_vc_notify_reset(pf);
9405 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
9407 /* quiesce the VSIs and their queues that are not already DOWN */
9408 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9411 i40e_pf_quiesce_all_vsi(pf);
9415 for (v = 0; v < pf->num_alloc_vsi; v++) {
9417 i40e_clean_xps_state(pf->vsi[v]);
9418 pf->vsi[v]->seid = 0;
9422 i40e_shutdown_adminq(&pf->hw);
9424 /* call shutdown HMC */
9425 if (hw->hmc.hmc_obj) {
9426 ret = i40e_shutdown_lan_hmc(hw);
9428 dev_warn(&pf->pdev->dev,
9429 "shutdown_lan_hmc failed: %d\n", ret);
9434 * i40e_send_version - update firmware with driver version
9437 static void i40e_send_version(struct i40e_pf *pf)
9439 struct i40e_driver_version dv;
9441 dv.major_version = DRV_VERSION_MAJOR;
9442 dv.minor_version = DRV_VERSION_MINOR;
9443 dv.build_version = DRV_VERSION_BUILD;
9444 dv.subbuild_version = 0;
9445 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
9446 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9450 * i40e_get_oem_version - get OEM specific version information
9451 * @hw: pointer to the hardware structure
9453 static void i40e_get_oem_version(struct i40e_hw *hw)
9455 u16 block_offset = 0xffff;
9456 u16 block_length = 0;
9457 u16 capabilities = 0;
9461 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9462 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9463 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9464 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9465 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9466 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9467 #define I40E_NVM_OEM_LENGTH 3
9469 /* Check if pointer to OEM version block is valid. */
9470 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9471 if (block_offset == 0xffff)
9474 /* Check if OEM version block has correct length. */
9475 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9477 if (block_length < I40E_NVM_OEM_LENGTH)
9480 /* Check if OEM version format is as expected. */
9481 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9483 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9486 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9488 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9490 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9491 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9495 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9496 * @pf: board private structure
9498 static int i40e_reset(struct i40e_pf *pf)
9500 struct i40e_hw *hw = &pf->hw;
9503 ret = i40e_pf_reset(hw);
9505 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
9506 set_bit(__I40E_RESET_FAILED, pf->state);
9507 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9515 * i40e_rebuild - rebuild using a saved config
9516 * @pf: board private structure
9517 * @reinit: if the Main VSI needs to re-initialized.
9518 * @lock_acquired: indicates whether or not the lock has been acquired
9519 * before this function was called.
9521 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
9523 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9524 struct i40e_hw *hw = &pf->hw;
9529 if (test_bit(__I40E_DOWN, pf->state))
9530 goto clear_recovery;
9531 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
9533 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9534 ret = i40e_init_adminq(&pf->hw);
9536 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
9537 i40e_stat_str(&pf->hw, ret),
9538 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9539 goto clear_recovery;
9541 i40e_get_oem_version(&pf->hw);
9543 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
9544 /* The following delay is necessary for firmware update. */
9548 /* re-verify the eeprom if we just had an EMP reset */
9549 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
9550 i40e_verify_eeprom(pf);
9552 i40e_clear_pxe_mode(hw);
9553 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
9555 goto end_core_reset;
9557 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9558 hw->func_caps.num_rx_qp, 0, 0);
9560 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
9561 goto end_core_reset;
9563 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9565 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
9566 goto end_core_reset;
9569 /* Enable FW to write a default DCB config on link-up */
9570 i40e_aq_set_dcb_parameters(hw, true, NULL);
9572 #ifdef CONFIG_I40E_DCB
9573 ret = i40e_init_pf_dcb(pf);
9575 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
9576 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9577 /* Continue without DCB enabled */
9579 #endif /* CONFIG_I40E_DCB */
9580 /* do basic switch setup */
9583 ret = i40e_setup_pf_switch(pf, reinit);
9587 /* The driver only wants link up/down and module qualification
9588 * reports from firmware. Note the negative logic.
9590 ret = i40e_aq_set_phy_int_mask(&pf->hw,
9591 ~(I40E_AQ_EVENT_LINK_UPDOWN |
9592 I40E_AQ_EVENT_MEDIA_NA |
9593 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
9595 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
9596 i40e_stat_str(&pf->hw, ret),
9597 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9599 /* Rebuild the VSIs and VEBs that existed before reset.
9600 * They are still in our local switch element arrays, so only
9601 * need to rebuild the switch model in the HW.
9603 * If there were VEBs but the reconstitution failed, we'll try
9604 * try to recover minimal use by getting the basic PF VSI working.
9606 if (vsi->uplink_seid != pf->mac_seid) {
9607 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
9608 /* find the one VEB connected to the MAC, and find orphans */
9609 for (v = 0; v < I40E_MAX_VEB; v++) {
9613 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
9614 pf->veb[v]->uplink_seid == 0) {
9615 ret = i40e_reconstitute_veb(pf->veb[v]);
9620 /* If Main VEB failed, we're in deep doodoo,
9621 * so give up rebuilding the switch and set up
9622 * for minimal rebuild of PF VSI.
9623 * If orphan failed, we'll report the error
9624 * but try to keep going.
9626 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
9627 dev_info(&pf->pdev->dev,
9628 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
9630 vsi->uplink_seid = pf->mac_seid;
9632 } else if (pf->veb[v]->uplink_seid == 0) {
9633 dev_info(&pf->pdev->dev,
9634 "rebuild of orphan VEB failed: %d\n",
9641 if (vsi->uplink_seid == pf->mac_seid) {
9642 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
9643 /* no VEB, so rebuild only the Main VSI */
9644 ret = i40e_add_vsi(vsi);
9646 dev_info(&pf->pdev->dev,
9647 "rebuild of Main VSI failed: %d\n", ret);
9652 if (vsi->mqprio_qopt.max_rate[0]) {
9653 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
9654 vsi->mqprio_qopt.max_rate[0]);
9657 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
9661 credits = max_tx_rate;
9662 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9663 dev_dbg(&vsi->back->pdev->dev,
9664 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9670 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
9674 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
9675 * for this main VSI if they exist
9677 ret = i40e_rebuild_channels(vsi);
9681 /* Reconfigure hardware for allowing smaller MSS in the case
9682 * of TSO, so that we avoid the MDD being fired and causing
9683 * a reset in the case of small MSS+TSO.
9685 #define I40E_REG_MSS 0x000E64DC
9686 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
9687 #define I40E_64BYTE_MSS 0x400000
9688 val = rd32(hw, I40E_REG_MSS);
9689 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
9690 val &= ~I40E_REG_MSS_MIN_MASK;
9691 val |= I40E_64BYTE_MSS;
9692 wr32(hw, I40E_REG_MSS, val);
9695 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
9697 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
9699 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
9700 i40e_stat_str(&pf->hw, ret),
9701 i40e_aq_str(&pf->hw,
9702 pf->hw.aq.asq_last_status));
9704 /* reinit the misc interrupt */
9705 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9706 ret = i40e_setup_misc_vector(pf);
9711 /* Add a filter to drop all Flow control frames from any VSI from being
9712 * transmitted. By doing so we stop a malicious VF from sending out
9713 * PAUSE or PFC frames and potentially controlling traffic for other
9715 * The FW can still send Flow control frames if enabled.
9717 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
9720 /* restart the VSIs that were rebuilt and running before the reset */
9721 i40e_pf_unquiesce_all_vsi(pf);
9723 /* Release the RTNL lock before we start resetting VFs */
9727 /* Restore promiscuous settings */
9728 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
9730 dev_warn(&pf->pdev->dev,
9731 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
9732 pf->cur_promisc ? "on" : "off",
9733 i40e_stat_str(&pf->hw, ret),
9734 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9736 i40e_reset_all_vfs(pf, true);
9738 /* tell the firmware that we're starting */
9739 i40e_send_version(pf);
9741 /* We've already released the lock, so don't do it again */
9742 goto end_core_reset;
9748 clear_bit(__I40E_RESET_FAILED, pf->state);
9750 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9751 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
9755 * i40e_reset_and_rebuild - reset and rebuild using a saved config
9756 * @pf: board private structure
9757 * @reinit: if the Main VSI needs to re-initialized.
9758 * @lock_acquired: indicates whether or not the lock has been acquired
9759 * before this function was called.
9761 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
9765 /* Now we wait for GRST to settle out.
9766 * We don't have to delete the VEBs or VSIs from the hw switch
9767 * because the reset will make them disappear.
9769 ret = i40e_reset(pf);
9771 i40e_rebuild(pf, reinit, lock_acquired);
9775 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
9776 * @pf: board private structure
9778 * Close up the VFs and other things in prep for a Core Reset,
9779 * then get ready to rebuild the world.
9780 * @lock_acquired: indicates whether or not the lock has been acquired
9781 * before this function was called.
9783 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
9785 i40e_prep_for_reset(pf, lock_acquired);
9786 i40e_reset_and_rebuild(pf, false, lock_acquired);
9790 * i40e_handle_mdd_event
9791 * @pf: pointer to the PF structure
9793 * Called from the MDD irq handler to identify possibly malicious vfs
9795 static void i40e_handle_mdd_event(struct i40e_pf *pf)
9797 struct i40e_hw *hw = &pf->hw;
9798 bool mdd_detected = false;
9799 bool pf_mdd_detected = false;
9804 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
9807 /* find what triggered the MDD event */
9808 reg = rd32(hw, I40E_GL_MDET_TX);
9809 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
9810 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
9811 I40E_GL_MDET_TX_PF_NUM_SHIFT;
9812 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
9813 I40E_GL_MDET_TX_VF_NUM_SHIFT;
9814 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
9815 I40E_GL_MDET_TX_EVENT_SHIFT;
9816 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
9817 I40E_GL_MDET_TX_QUEUE_SHIFT) -
9818 pf->hw.func_caps.base_queue;
9819 if (netif_msg_tx_err(pf))
9820 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
9821 event, queue, pf_num, vf_num);
9822 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
9823 mdd_detected = true;
9825 reg = rd32(hw, I40E_GL_MDET_RX);
9826 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
9827 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
9828 I40E_GL_MDET_RX_FUNCTION_SHIFT;
9829 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
9830 I40E_GL_MDET_RX_EVENT_SHIFT;
9831 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
9832 I40E_GL_MDET_RX_QUEUE_SHIFT) -
9833 pf->hw.func_caps.base_queue;
9834 if (netif_msg_rx_err(pf))
9835 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
9836 event, queue, func);
9837 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
9838 mdd_detected = true;
9842 reg = rd32(hw, I40E_PF_MDET_TX);
9843 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
9844 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
9845 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
9846 pf_mdd_detected = true;
9848 reg = rd32(hw, I40E_PF_MDET_RX);
9849 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
9850 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
9851 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
9852 pf_mdd_detected = true;
9854 /* Queue belongs to the PF, initiate a reset */
9855 if (pf_mdd_detected) {
9856 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9857 i40e_service_event_schedule(pf);
9861 /* see if one of the VFs needs its hand slapped */
9862 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
9864 reg = rd32(hw, I40E_VP_MDET_TX(i));
9865 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
9866 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
9867 vf->num_mdd_events++;
9868 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
9872 reg = rd32(hw, I40E_VP_MDET_RX(i));
9873 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
9874 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
9875 vf->num_mdd_events++;
9876 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
9880 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
9881 dev_info(&pf->pdev->dev,
9882 "Too many MDD events on VF %d, disabled\n", i);
9883 dev_info(&pf->pdev->dev,
9884 "Use PF Control I/F to re-enable the VF\n");
9885 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
9889 /* re-enable mdd interrupt cause */
9890 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
9891 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
9892 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
9893 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
9897 static const char *i40e_tunnel_name(u8 type)
9900 case UDP_TUNNEL_TYPE_VXLAN:
9902 case UDP_TUNNEL_TYPE_GENEVE:
9910 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
9911 * @pf: board private structure
9913 static void i40e_sync_udp_filters(struct i40e_pf *pf)
9917 /* loop through and set pending bit for all active UDP filters */
9918 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9919 if (pf->udp_ports[i].port)
9920 pf->pending_udp_bitmap |= BIT_ULL(i);
9923 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
9927 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
9928 * @pf: board private structure
9930 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
9932 struct i40e_hw *hw = &pf->hw;
9933 u8 filter_index, type;
9937 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
9940 /* acquire RTNL to maintain state of flags and port requests */
9943 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9944 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
9945 struct i40e_udp_port_config *udp_port;
9946 i40e_status ret = 0;
9948 udp_port = &pf->udp_ports[i];
9949 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9951 port = READ_ONCE(udp_port->port);
9952 type = READ_ONCE(udp_port->type);
9953 filter_index = READ_ONCE(udp_port->filter_index);
9955 /* release RTNL while we wait on AQ command */
9959 ret = i40e_aq_add_udp_tunnel(hw, port,
9963 else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
9964 ret = i40e_aq_del_udp_tunnel(hw, filter_index,
9967 /* reacquire RTNL so we can update filter_index */
9971 dev_info(&pf->pdev->dev,
9972 "%s %s port %d, index %d failed, err %s aq_err %s\n",
9973 i40e_tunnel_name(type),
9974 port ? "add" : "delete",
9977 i40e_stat_str(&pf->hw, ret),
9978 i40e_aq_str(&pf->hw,
9979 pf->hw.aq.asq_last_status));
9981 /* failed to add, just reset port,
9982 * drop pending bit for any deletion
9985 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9988 /* record filter index on success */
9989 udp_port->filter_index = filter_index;
9998 * i40e_service_task - Run the driver's async subtasks
9999 * @work: pointer to work_struct containing our data
10001 static void i40e_service_task(struct work_struct *work)
10003 struct i40e_pf *pf = container_of(work,
10006 unsigned long start_time = jiffies;
10008 /* don't bother with service tasks if a reset is in progress */
10009 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
10012 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
10015 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
10016 i40e_sync_filters_subtask(pf);
10017 i40e_reset_subtask(pf);
10018 i40e_handle_mdd_event(pf);
10019 i40e_vc_process_vflr_event(pf);
10020 i40e_watchdog_subtask(pf);
10021 i40e_fdir_reinit_subtask(pf);
10022 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
10023 /* Client subtask will reopen next time through. */
10024 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
10026 i40e_client_subtask(pf);
10027 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
10029 i40e_notify_client_of_l2_param_changes(
10030 pf->vsi[pf->lan_vsi]);
10032 i40e_sync_filters_subtask(pf);
10033 i40e_sync_udp_filters_subtask(pf);
10034 i40e_clean_adminq_subtask(pf);
10036 /* flush memory to make sure state is correct before next watchdog */
10037 smp_mb__before_atomic();
10038 clear_bit(__I40E_SERVICE_SCHED, pf->state);
10040 /* If the tasks have taken longer than one timer cycle or there
10041 * is more work to be done, reschedule the service task now
10042 * rather than wait for the timer to tick again.
10044 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
10045 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
10046 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
10047 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
10048 i40e_service_event_schedule(pf);
10052 * i40e_service_timer - timer callback
10053 * @data: pointer to PF struct
10055 static void i40e_service_timer(struct timer_list *t)
10057 struct i40e_pf *pf = from_timer(pf, t, service_timer);
10059 mod_timer(&pf->service_timer,
10060 round_jiffies(jiffies + pf->service_timer_period));
10061 i40e_service_event_schedule(pf);
10065 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
10066 * @vsi: the VSI being configured
10068 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
10070 struct i40e_pf *pf = vsi->back;
10072 switch (vsi->type) {
10073 case I40E_VSI_MAIN:
10074 vsi->alloc_queue_pairs = pf->num_lan_qps;
10075 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10076 I40E_REQ_DESCRIPTOR_MULTIPLE);
10077 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10078 vsi->num_q_vectors = pf->num_lan_msix;
10080 vsi->num_q_vectors = 1;
10084 case I40E_VSI_FDIR:
10085 vsi->alloc_queue_pairs = 1;
10086 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
10087 I40E_REQ_DESCRIPTOR_MULTIPLE);
10088 vsi->num_q_vectors = pf->num_fdsb_msix;
10091 case I40E_VSI_VMDQ2:
10092 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
10093 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10094 I40E_REQ_DESCRIPTOR_MULTIPLE);
10095 vsi->num_q_vectors = pf->num_vmdq_msix;
10098 case I40E_VSI_SRIOV:
10099 vsi->alloc_queue_pairs = pf->num_vf_qps;
10100 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10101 I40E_REQ_DESCRIPTOR_MULTIPLE);
10113 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
10114 * @vsi: VSI pointer
10115 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
10117 * On error: returns error code (negative)
10118 * On success: returns 0
10120 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
10122 struct i40e_ring **next_rings;
10126 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
10127 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
10128 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
10129 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
10130 if (!vsi->tx_rings)
10132 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
10133 if (i40e_enabled_xdp_vsi(vsi)) {
10134 vsi->xdp_rings = next_rings;
10135 next_rings += vsi->alloc_queue_pairs;
10137 vsi->rx_rings = next_rings;
10139 if (alloc_qvectors) {
10140 /* allocate memory for q_vector pointers */
10141 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
10142 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
10143 if (!vsi->q_vectors) {
10151 kfree(vsi->tx_rings);
10156 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
10157 * @pf: board private structure
10158 * @type: type of VSI
10160 * On error: returns error code (negative)
10161 * On success: returns vsi index in PF (positive)
10163 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10166 struct i40e_vsi *vsi;
10170 /* Need to protect the allocation of the VSIs at the PF level */
10171 mutex_lock(&pf->switch_mutex);
10173 /* VSI list may be fragmented if VSI creation/destruction has
10174 * been happening. We can afford to do a quick scan to look
10175 * for any free VSIs in the list.
10177 * find next empty vsi slot, looping back around if necessary
10180 while (i < pf->num_alloc_vsi && pf->vsi[i])
10182 if (i >= pf->num_alloc_vsi) {
10184 while (i < pf->next_vsi && pf->vsi[i])
10188 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
10189 vsi_idx = i; /* Found one! */
10192 goto unlock_pf; /* out of VSI slots! */
10194 pf->next_vsi = ++i;
10196 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
10203 set_bit(__I40E_VSI_DOWN, vsi->state);
10205 vsi->idx = vsi_idx;
10206 vsi->int_rate_limit = 0;
10207 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
10208 pf->rss_table_size : 64;
10209 vsi->netdev_registered = false;
10210 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
10211 hash_init(vsi->mac_filter_hash);
10212 vsi->irqs_ready = false;
10214 ret = i40e_set_num_rings_in_vsi(vsi);
10218 ret = i40e_vsi_alloc_arrays(vsi, true);
10222 /* Setup default MSIX irq handler for VSI */
10223 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
10225 /* Initialize VSI lock */
10226 spin_lock_init(&vsi->mac_filter_hash_lock);
10227 pf->vsi[vsi_idx] = vsi;
10232 pf->next_vsi = i - 1;
10235 mutex_unlock(&pf->switch_mutex);
10240 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
10241 * @vsi: VSI pointer
10242 * @free_qvectors: a bool to specify if q_vectors need to be freed.
10244 * On error: returns error code (negative)
10245 * On success: returns 0
10247 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
10249 /* free the ring and vector containers */
10250 if (free_qvectors) {
10251 kfree(vsi->q_vectors);
10252 vsi->q_vectors = NULL;
10254 kfree(vsi->tx_rings);
10255 vsi->tx_rings = NULL;
10256 vsi->rx_rings = NULL;
10257 vsi->xdp_rings = NULL;
10261 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
10263 * @vsi: Pointer to VSI structure
10265 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
10270 kfree(vsi->rss_hkey_user);
10271 vsi->rss_hkey_user = NULL;
10273 kfree(vsi->rss_lut_user);
10274 vsi->rss_lut_user = NULL;
10278 * i40e_vsi_clear - Deallocate the VSI provided
10279 * @vsi: the VSI being un-configured
10281 static int i40e_vsi_clear(struct i40e_vsi *vsi)
10283 struct i40e_pf *pf;
10292 mutex_lock(&pf->switch_mutex);
10293 if (!pf->vsi[vsi->idx]) {
10294 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
10295 vsi->idx, vsi->idx, vsi->type);
10299 if (pf->vsi[vsi->idx] != vsi) {
10300 dev_err(&pf->pdev->dev,
10301 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
10302 pf->vsi[vsi->idx]->idx,
10303 pf->vsi[vsi->idx]->type,
10304 vsi->idx, vsi->type);
10308 /* updates the PF for this cleared vsi */
10309 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10310 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10312 i40e_vsi_free_arrays(vsi, true);
10313 i40e_clear_rss_config_user(vsi);
10315 pf->vsi[vsi->idx] = NULL;
10316 if (vsi->idx < pf->next_vsi)
10317 pf->next_vsi = vsi->idx;
10320 mutex_unlock(&pf->switch_mutex);
10328 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
10329 * @vsi: the VSI being cleaned
10331 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
10335 if (vsi->tx_rings && vsi->tx_rings[0]) {
10336 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10337 kfree_rcu(vsi->tx_rings[i], rcu);
10338 WRITE_ONCE(vsi->tx_rings[i], NULL);
10339 WRITE_ONCE(vsi->rx_rings[i], NULL);
10340 if (vsi->xdp_rings)
10341 WRITE_ONCE(vsi->xdp_rings[i], NULL);
10347 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
10348 * @vsi: the VSI being configured
10350 static int i40e_alloc_rings(struct i40e_vsi *vsi)
10352 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
10353 struct i40e_pf *pf = vsi->back;
10354 struct i40e_ring *ring;
10356 /* Set basic values in the rings to be used later during open() */
10357 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10358 /* allocate space for both Tx and Rx in one shot */
10359 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10363 ring->queue_index = i;
10364 ring->reg_idx = vsi->base_queue + i;
10365 ring->ring_active = false;
10367 ring->netdev = vsi->netdev;
10368 ring->dev = &pf->pdev->dev;
10369 ring->count = vsi->num_desc;
10372 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10373 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10374 ring->itr_setting = pf->tx_itr_default;
10375 WRITE_ONCE(vsi->tx_rings[i], ring++);
10377 if (!i40e_enabled_xdp_vsi(vsi))
10380 ring->queue_index = vsi->alloc_queue_pairs + i;
10381 ring->reg_idx = vsi->base_queue + ring->queue_index;
10382 ring->ring_active = false;
10384 ring->netdev = NULL;
10385 ring->dev = &pf->pdev->dev;
10386 ring->count = vsi->num_desc;
10389 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10390 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10391 set_ring_xdp(ring);
10392 ring->itr_setting = pf->tx_itr_default;
10393 WRITE_ONCE(vsi->xdp_rings[i], ring++);
10396 ring->queue_index = i;
10397 ring->reg_idx = vsi->base_queue + i;
10398 ring->ring_active = false;
10400 ring->netdev = vsi->netdev;
10401 ring->dev = &pf->pdev->dev;
10402 ring->count = vsi->num_desc;
10405 ring->itr_setting = pf->rx_itr_default;
10406 WRITE_ONCE(vsi->rx_rings[i], ring);
10412 i40e_vsi_clear_rings(vsi);
10417 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10418 * @pf: board private structure
10419 * @vectors: the number of MSI-X vectors to request
10421 * Returns the number of vectors reserved, or error
10423 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10425 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10426 I40E_MIN_MSIX, vectors);
10428 dev_info(&pf->pdev->dev,
10429 "MSI-X vector reservation failed: %d\n", vectors);
10437 * i40e_init_msix - Setup the MSIX capability
10438 * @pf: board private structure
10440 * Work with the OS to set up the MSIX vectors needed.
10442 * Returns the number of vectors reserved or negative on failure
10444 static int i40e_init_msix(struct i40e_pf *pf)
10446 struct i40e_hw *hw = &pf->hw;
10447 int cpus, extra_vectors;
10451 int iwarp_requested = 0;
10453 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10456 /* The number of vectors we'll request will be comprised of:
10457 * - Add 1 for "other" cause for Admin Queue events, etc.
10458 * - The number of LAN queue pairs
10459 * - Queues being used for RSS.
10460 * We don't need as many as max_rss_size vectors.
10461 * use rss_size instead in the calculation since that
10462 * is governed by number of cpus in the system.
10463 * - assumes symmetric Tx/Rx pairing
10464 * - The number of VMDq pairs
10465 * - The CPU count within the NUMA node if iWARP is enabled
10466 * Once we count this up, try the request.
10468 * If we can't get what we want, we'll simplify to nearly nothing
10469 * and try again. If that still fails, we punt.
10471 vectors_left = hw->func_caps.num_msix_vectors;
10474 /* reserve one vector for miscellaneous handler */
10475 if (vectors_left) {
10480 /* reserve some vectors for the main PF traffic queues. Initially we
10481 * only reserve at most 50% of the available vectors, in the case that
10482 * the number of online CPUs is large. This ensures that we can enable
10483 * extra features as well. Once we've enabled the other features, we
10484 * will use any remaining vectors to reach as close as we can to the
10485 * number of online CPUs.
10487 cpus = num_online_cpus();
10488 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
10489 vectors_left -= pf->num_lan_msix;
10491 /* reserve one vector for sideband flow director */
10492 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10493 if (vectors_left) {
10494 pf->num_fdsb_msix = 1;
10498 pf->num_fdsb_msix = 0;
10502 /* can we reserve enough for iWARP? */
10503 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10504 iwarp_requested = pf->num_iwarp_msix;
10507 pf->num_iwarp_msix = 0;
10508 else if (vectors_left < pf->num_iwarp_msix)
10509 pf->num_iwarp_msix = 1;
10510 v_budget += pf->num_iwarp_msix;
10511 vectors_left -= pf->num_iwarp_msix;
10514 /* any vectors left over go for VMDq support */
10515 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
10516 if (!vectors_left) {
10517 pf->num_vmdq_msix = 0;
10518 pf->num_vmdq_qps = 0;
10520 int vmdq_vecs_wanted =
10521 pf->num_vmdq_vsis * pf->num_vmdq_qps;
10523 min_t(int, vectors_left, vmdq_vecs_wanted);
10525 /* if we're short on vectors for what's desired, we limit
10526 * the queues per vmdq. If this is still more than are
10527 * available, the user will need to change the number of
10528 * queues/vectors used by the PF later with the ethtool
10531 if (vectors_left < vmdq_vecs_wanted) {
10532 pf->num_vmdq_qps = 1;
10533 vmdq_vecs_wanted = pf->num_vmdq_vsis;
10534 vmdq_vecs = min_t(int,
10538 pf->num_vmdq_msix = pf->num_vmdq_qps;
10540 v_budget += vmdq_vecs;
10541 vectors_left -= vmdq_vecs;
10545 /* On systems with a large number of SMP cores, we previously limited
10546 * the number of vectors for num_lan_msix to be at most 50% of the
10547 * available vectors, to allow for other features. Now, we add back
10548 * the remaining vectors. However, we ensure that the total
10549 * num_lan_msix will not exceed num_online_cpus(). To do this, we
10550 * calculate the number of vectors we can add without going over the
10551 * cap of CPUs. For systems with a small number of CPUs this will be
10554 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
10555 pf->num_lan_msix += extra_vectors;
10556 vectors_left -= extra_vectors;
10558 WARN(vectors_left < 0,
10559 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
10561 v_budget += pf->num_lan_msix;
10562 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
10564 if (!pf->msix_entries)
10567 for (i = 0; i < v_budget; i++)
10568 pf->msix_entries[i].entry = i;
10569 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
10571 if (v_actual < I40E_MIN_MSIX) {
10572 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
10573 kfree(pf->msix_entries);
10574 pf->msix_entries = NULL;
10575 pci_disable_msix(pf->pdev);
10578 } else if (v_actual == I40E_MIN_MSIX) {
10579 /* Adjust for minimal MSIX use */
10580 pf->num_vmdq_vsis = 0;
10581 pf->num_vmdq_qps = 0;
10582 pf->num_lan_qps = 1;
10583 pf->num_lan_msix = 1;
10585 } else if (v_actual != v_budget) {
10586 /* If we have limited resources, we will start with no vectors
10587 * for the special features and then allocate vectors to some
10588 * of these features based on the policy and at the end disable
10589 * the features that did not get any vectors.
10593 dev_info(&pf->pdev->dev,
10594 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
10595 v_actual, v_budget);
10596 /* reserve the misc vector */
10597 vec = v_actual - 1;
10599 /* Scale vector usage down */
10600 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
10601 pf->num_vmdq_vsis = 1;
10602 pf->num_vmdq_qps = 1;
10604 /* partition out the remaining vectors */
10607 pf->num_lan_msix = 1;
10610 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10611 pf->num_lan_msix = 1;
10612 pf->num_iwarp_msix = 1;
10614 pf->num_lan_msix = 2;
10618 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10619 pf->num_iwarp_msix = min_t(int, (vec / 3),
10621 pf->num_vmdq_vsis = min_t(int, (vec / 3),
10622 I40E_DEFAULT_NUM_VMDQ_VSI);
10624 pf->num_vmdq_vsis = min_t(int, (vec / 2),
10625 I40E_DEFAULT_NUM_VMDQ_VSI);
10627 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10628 pf->num_fdsb_msix = 1;
10631 pf->num_lan_msix = min_t(int,
10632 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
10634 pf->num_lan_qps = pf->num_lan_msix;
10639 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
10640 (pf->num_fdsb_msix == 0)) {
10641 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
10642 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10643 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10645 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10646 (pf->num_vmdq_msix == 0)) {
10647 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
10648 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
10651 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
10652 (pf->num_iwarp_msix == 0)) {
10653 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
10654 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
10656 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
10657 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
10659 pf->num_vmdq_msix * pf->num_vmdq_vsis,
10661 pf->num_iwarp_msix);
10667 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
10668 * @vsi: the VSI being configured
10669 * @v_idx: index of the vector in the vsi struct
10670 * @cpu: cpu to be used on affinity_mask
10672 * We allocate one q_vector. If allocation fails we return -ENOMEM.
10674 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
10676 struct i40e_q_vector *q_vector;
10678 /* allocate q_vector */
10679 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
10683 q_vector->vsi = vsi;
10684 q_vector->v_idx = v_idx;
10685 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
10688 netif_napi_add(vsi->netdev, &q_vector->napi,
10689 i40e_napi_poll, NAPI_POLL_WEIGHT);
10691 /* tie q_vector and vsi together */
10692 vsi->q_vectors[v_idx] = q_vector;
10698 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
10699 * @vsi: the VSI being configured
10701 * We allocate one q_vector per queue interrupt. If allocation fails we
10704 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
10706 struct i40e_pf *pf = vsi->back;
10707 int err, v_idx, num_q_vectors, current_cpu;
10709 /* if not MSIX, give the one vector only to the LAN VSI */
10710 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10711 num_q_vectors = vsi->num_q_vectors;
10712 else if (vsi == pf->vsi[pf->lan_vsi])
10717 current_cpu = cpumask_first(cpu_online_mask);
10719 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
10720 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
10723 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
10724 if (unlikely(current_cpu >= nr_cpu_ids))
10725 current_cpu = cpumask_first(cpu_online_mask);
10732 i40e_free_q_vector(vsi, v_idx);
10738 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
10739 * @pf: board private structure to initialize
10741 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
10746 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10747 vectors = i40e_init_msix(pf);
10749 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
10750 I40E_FLAG_IWARP_ENABLED |
10751 I40E_FLAG_RSS_ENABLED |
10752 I40E_FLAG_DCB_CAPABLE |
10753 I40E_FLAG_DCB_ENABLED |
10754 I40E_FLAG_SRIOV_ENABLED |
10755 I40E_FLAG_FD_SB_ENABLED |
10756 I40E_FLAG_FD_ATR_ENABLED |
10757 I40E_FLAG_VMDQ_ENABLED);
10758 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10760 /* rework the queue expectations without MSIX */
10761 i40e_determine_queue_usage(pf);
10765 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10766 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
10767 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
10768 vectors = pci_enable_msi(pf->pdev);
10770 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
10772 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
10774 vectors = 1; /* one MSI or Legacy vector */
10777 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
10778 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
10780 /* set up vector assignment tracking */
10781 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
10782 pf->irq_pile = kzalloc(size, GFP_KERNEL);
10786 pf->irq_pile->num_entries = vectors;
10788 /* track first vector for misc interrupts, ignore return */
10789 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
10795 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
10796 * @pf: private board data structure
10798 * Restore the interrupt scheme that was cleared when we suspended the
10799 * device. This should be called during resume to re-allocate the q_vectors
10800 * and reacquire IRQs.
10802 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
10806 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
10807 * scheme. We need to re-enabled them here in order to attempt to
10808 * re-acquire the MSI or MSI-X vectors
10810 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
10812 err = i40e_init_interrupt_scheme(pf);
10816 /* Now that we've re-acquired IRQs, we need to remap the vectors and
10817 * rings together again.
10819 for (i = 0; i < pf->num_alloc_vsi; i++) {
10821 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
10824 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
10828 err = i40e_setup_misc_vector(pf);
10832 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
10833 i40e_client_update_msix_info(pf);
10840 i40e_vsi_free_q_vectors(pf->vsi[i]);
10847 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
10848 * @pf: board private structure
10850 * This sets up the handler for MSIX 0, which is used to manage the
10851 * non-queue interrupts, e.g. AdminQ and errors. This is not used
10852 * when in MSI or Legacy interrupt mode.
10854 static int i40e_setup_misc_vector(struct i40e_pf *pf)
10856 struct i40e_hw *hw = &pf->hw;
10859 /* Only request the IRQ once, the first time through. */
10860 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
10861 err = request_irq(pf->msix_entries[0].vector,
10862 i40e_intr, 0, pf->int_name, pf);
10864 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
10865 dev_info(&pf->pdev->dev,
10866 "request_irq for %s failed: %d\n",
10867 pf->int_name, err);
10872 i40e_enable_misc_int_causes(pf);
10874 /* associate no queues to the misc vector */
10875 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
10876 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
10880 i40e_irq_dynamic_enable_icr0(pf);
10886 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
10887 * @vsi: Pointer to vsi structure
10888 * @seed: Buffter to store the hash keys
10889 * @lut: Buffer to store the lookup table entries
10890 * @lut_size: Size of buffer to store the lookup table entries
10892 * Return 0 on success, negative on failure
10894 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
10895 u8 *lut, u16 lut_size)
10897 struct i40e_pf *pf = vsi->back;
10898 struct i40e_hw *hw = &pf->hw;
10902 ret = i40e_aq_get_rss_key(hw, vsi->id,
10903 (struct i40e_aqc_get_set_rss_key_data *)seed);
10905 dev_info(&pf->pdev->dev,
10906 "Cannot get RSS key, err %s aq_err %s\n",
10907 i40e_stat_str(&pf->hw, ret),
10908 i40e_aq_str(&pf->hw,
10909 pf->hw.aq.asq_last_status));
10915 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
10917 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
10919 dev_info(&pf->pdev->dev,
10920 "Cannot get RSS lut, err %s aq_err %s\n",
10921 i40e_stat_str(&pf->hw, ret),
10922 i40e_aq_str(&pf->hw,
10923 pf->hw.aq.asq_last_status));
10932 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
10933 * @vsi: Pointer to vsi structure
10934 * @seed: RSS hash seed
10935 * @lut: Lookup table
10936 * @lut_size: Lookup table size
10938 * Returns 0 on success, negative on failure
10940 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
10941 const u8 *lut, u16 lut_size)
10943 struct i40e_pf *pf = vsi->back;
10944 struct i40e_hw *hw = &pf->hw;
10945 u16 vf_id = vsi->vf_id;
10948 /* Fill out hash function seed */
10950 u32 *seed_dw = (u32 *)seed;
10952 if (vsi->type == I40E_VSI_MAIN) {
10953 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
10954 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
10955 } else if (vsi->type == I40E_VSI_SRIOV) {
10956 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
10957 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
10959 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
10964 u32 *lut_dw = (u32 *)lut;
10966 if (vsi->type == I40E_VSI_MAIN) {
10967 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10969 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10970 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
10971 } else if (vsi->type == I40E_VSI_SRIOV) {
10972 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
10974 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
10975 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
10977 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
10986 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
10987 * @vsi: Pointer to VSI structure
10988 * @seed: Buffer to store the keys
10989 * @lut: Buffer to store the lookup table entries
10990 * @lut_size: Size of buffer to store the lookup table entries
10992 * Returns 0 on success, negative on failure
10994 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
10995 u8 *lut, u16 lut_size)
10997 struct i40e_pf *pf = vsi->back;
10998 struct i40e_hw *hw = &pf->hw;
11002 u32 *seed_dw = (u32 *)seed;
11004 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
11005 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
11008 u32 *lut_dw = (u32 *)lut;
11010 if (lut_size != I40E_HLUT_ARRAY_SIZE)
11012 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11013 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
11020 * i40e_config_rss - Configure RSS keys and lut
11021 * @vsi: Pointer to VSI structure
11022 * @seed: RSS hash seed
11023 * @lut: Lookup table
11024 * @lut_size: Lookup table size
11026 * Returns 0 on success, negative on failure
11028 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11030 struct i40e_pf *pf = vsi->back;
11032 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11033 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
11035 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
11039 * i40e_get_rss - Get RSS keys and lut
11040 * @vsi: Pointer to VSI structure
11041 * @seed: Buffer to store the keys
11042 * @lut: Buffer to store the lookup table entries
11043 * @lut_size: Size of buffer to store the lookup table entries
11045 * Returns 0 on success, negative on failure
11047 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
11049 struct i40e_pf *pf = vsi->back;
11051 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
11052 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
11054 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
11058 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
11059 * @pf: Pointer to board private structure
11060 * @lut: Lookup table
11061 * @rss_table_size: Lookup table size
11062 * @rss_size: Range of queue number for hashing
11064 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
11065 u16 rss_table_size, u16 rss_size)
11069 for (i = 0; i < rss_table_size; i++)
11070 lut[i] = i % rss_size;
11074 * i40e_pf_config_rss - Prepare for RSS if used
11075 * @pf: board private structure
11077 static int i40e_pf_config_rss(struct i40e_pf *pf)
11079 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11080 u8 seed[I40E_HKEY_ARRAY_SIZE];
11082 struct i40e_hw *hw = &pf->hw;
11087 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
11088 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
11089 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
11090 hena |= i40e_pf_get_default_rss_hena(pf);
11092 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
11093 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
11095 /* Determine the RSS table size based on the hardware capabilities */
11096 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
11097 reg_val = (pf->rss_table_size == 512) ?
11098 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
11099 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
11100 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
11102 /* Determine the RSS size of the VSI */
11103 if (!vsi->rss_size) {
11105 /* If the firmware does something weird during VSI init, we
11106 * could end up with zero TCs. Check for that to avoid
11107 * divide-by-zero. It probably won't pass traffic, but it also
11110 qcount = vsi->num_queue_pairs /
11111 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
11112 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11114 if (!vsi->rss_size)
11117 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
11121 /* Use user configured lut if there is one, otherwise use default */
11122 if (vsi->rss_lut_user)
11123 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
11125 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
11127 /* Use user configured hash key if there is one, otherwise
11130 if (vsi->rss_hkey_user)
11131 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
11133 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
11134 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
11141 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
11142 * @pf: board private structure
11143 * @queue_count: the requested queue count for rss.
11145 * returns 0 if rss is not enabled, if enabled returns the final rss queue
11146 * count which may be different from the requested queue count.
11147 * Note: expects to be called while under rtnl_lock()
11149 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
11151 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11154 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
11157 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
11159 if (queue_count != vsi->num_queue_pairs) {
11162 vsi->req_queue_pairs = queue_count;
11163 i40e_prep_for_reset(pf, true);
11165 pf->alloc_rss_size = new_rss_size;
11167 i40e_reset_and_rebuild(pf, true, true);
11169 /* Discard the user configured hash keys and lut, if less
11170 * queues are enabled.
11172 if (queue_count < vsi->rss_size) {
11173 i40e_clear_rss_config_user(vsi);
11174 dev_dbg(&pf->pdev->dev,
11175 "discard user configured hash keys and lut\n");
11178 /* Reset vsi->rss_size, as number of enabled queues changed */
11179 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
11180 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11182 i40e_pf_config_rss(pf);
11184 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
11185 vsi->req_queue_pairs, pf->rss_size_max);
11186 return pf->alloc_rss_size;
11190 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
11191 * @pf: board private structure
11193 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
11195 i40e_status status;
11196 bool min_valid, max_valid;
11197 u32 max_bw, min_bw;
11199 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
11200 &min_valid, &max_valid);
11204 pf->min_bw = min_bw;
11206 pf->max_bw = max_bw;
11213 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
11214 * @pf: board private structure
11216 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
11218 struct i40e_aqc_configure_partition_bw_data bw_data;
11219 i40e_status status;
11221 memset(&bw_data, 0, sizeof(bw_data));
11223 /* Set the valid bit for this PF */
11224 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
11225 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
11226 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
11228 /* Set the new bandwidths */
11229 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
11235 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
11236 * @pf: board private structure
11238 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
11240 /* Commit temporary BW setting to permanent NVM image */
11241 enum i40e_admin_queue_err last_aq_status;
11245 if (pf->hw.partition_id != 1) {
11246 dev_info(&pf->pdev->dev,
11247 "Commit BW only works on partition 1! This is partition %d",
11248 pf->hw.partition_id);
11249 ret = I40E_NOT_SUPPORTED;
11250 goto bw_commit_out;
11253 /* Acquire NVM for read access */
11254 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
11255 last_aq_status = pf->hw.aq.asq_last_status;
11257 dev_info(&pf->pdev->dev,
11258 "Cannot acquire NVM for read access, err %s aq_err %s\n",
11259 i40e_stat_str(&pf->hw, ret),
11260 i40e_aq_str(&pf->hw, last_aq_status));
11261 goto bw_commit_out;
11264 /* Read word 0x10 of NVM - SW compatibility word 1 */
11265 ret = i40e_aq_read_nvm(&pf->hw,
11266 I40E_SR_NVM_CONTROL_WORD,
11267 0x10, sizeof(nvm_word), &nvm_word,
11269 /* Save off last admin queue command status before releasing
11272 last_aq_status = pf->hw.aq.asq_last_status;
11273 i40e_release_nvm(&pf->hw);
11275 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
11276 i40e_stat_str(&pf->hw, ret),
11277 i40e_aq_str(&pf->hw, last_aq_status));
11278 goto bw_commit_out;
11281 /* Wait a bit for NVM release to complete */
11284 /* Acquire NVM for write access */
11285 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
11286 last_aq_status = pf->hw.aq.asq_last_status;
11288 dev_info(&pf->pdev->dev,
11289 "Cannot acquire NVM for write access, err %s aq_err %s\n",
11290 i40e_stat_str(&pf->hw, ret),
11291 i40e_aq_str(&pf->hw, last_aq_status));
11292 goto bw_commit_out;
11294 /* Write it back out unchanged to initiate update NVM,
11295 * which will force a write of the shadow (alt) RAM to
11296 * the NVM - thus storing the bandwidth values permanently.
11298 ret = i40e_aq_update_nvm(&pf->hw,
11299 I40E_SR_NVM_CONTROL_WORD,
11300 0x10, sizeof(nvm_word),
11301 &nvm_word, true, 0, NULL);
11302 /* Save off last admin queue command status before releasing
11305 last_aq_status = pf->hw.aq.asq_last_status;
11306 i40e_release_nvm(&pf->hw);
11308 dev_info(&pf->pdev->dev,
11309 "BW settings NOT SAVED, err %s aq_err %s\n",
11310 i40e_stat_str(&pf->hw, ret),
11311 i40e_aq_str(&pf->hw, last_aq_status));
11318 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
11319 * @pf: board private structure to initialize
11321 * i40e_sw_init initializes the Adapter private data structure.
11322 * Fields are initialized based on PCI device information and
11323 * OS network device settings (MTU size).
11325 static int i40e_sw_init(struct i40e_pf *pf)
11331 /* Set default capability flags */
11332 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
11333 I40E_FLAG_MSI_ENABLED |
11334 I40E_FLAG_MSIX_ENABLED;
11336 /* Set default ITR */
11337 pf->rx_itr_default = I40E_ITR_RX_DEF;
11338 pf->tx_itr_default = I40E_ITR_TX_DEF;
11340 /* Depending on PF configurations, it is possible that the RSS
11341 * maximum might end up larger than the available queues
11343 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
11344 pf->alloc_rss_size = 1;
11345 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
11346 pf->rss_size_max = min_t(int, pf->rss_size_max,
11347 pf->hw.func_caps.num_tx_qp);
11349 /* find the next higher power-of-2 of num cpus */
11350 pow = roundup_pow_of_two(num_online_cpus());
11351 pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
11353 if (pf->hw.func_caps.rss) {
11354 pf->flags |= I40E_FLAG_RSS_ENABLED;
11355 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11356 num_online_cpus());
11359 /* MFP mode enabled */
11360 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
11361 pf->flags |= I40E_FLAG_MFP_ENABLED;
11362 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
11363 if (i40e_get_partition_bw_setting(pf)) {
11364 dev_warn(&pf->pdev->dev,
11365 "Could not get partition bw settings\n");
11367 dev_info(&pf->pdev->dev,
11368 "Partition BW Min = %8.8x, Max = %8.8x\n",
11369 pf->min_bw, pf->max_bw);
11371 /* nudge the Tx scheduler */
11372 i40e_set_partition_bw_setting(pf);
11376 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11377 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11378 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11379 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
11380 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11381 pf->hw.num_partitions > 1)
11382 dev_info(&pf->pdev->dev,
11383 "Flow Director Sideband mode Disabled in MFP mode\n");
11385 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11386 pf->fdir_pf_filter_count =
11387 pf->hw.func_caps.fd_filters_guaranteed;
11388 pf->hw.fdir_shared_filter_count =
11389 pf->hw.func_caps.fd_filters_best_effort;
11392 if (pf->hw.mac.type == I40E_MAC_X722) {
11393 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
11394 I40E_HW_128_QP_RSS_CAPABLE |
11395 I40E_HW_ATR_EVICT_CAPABLE |
11396 I40E_HW_WB_ON_ITR_CAPABLE |
11397 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
11398 I40E_HW_NO_PCI_LINK_CHECK |
11399 I40E_HW_USE_SET_LLDP_MIB |
11400 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
11401 I40E_HW_PTP_L4_CAPABLE |
11402 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
11403 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
11405 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11406 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
11407 I40E_FDEVICT_PCTYPE_DEFAULT) {
11408 dev_warn(&pf->pdev->dev,
11409 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11410 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
11412 } else if ((pf->hw.aq.api_maj_ver > 1) ||
11413 ((pf->hw.aq.api_maj_ver == 1) &&
11414 (pf->hw.aq.api_min_ver > 4))) {
11415 /* Supported in FW API version higher than 1.4 */
11416 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
11419 /* Enable HW ATR eviction if possible */
11420 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
11421 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
11423 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11424 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
11425 (pf->hw.aq.fw_maj_ver < 4))) {
11426 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
11427 /* No DCB support for FW < v4.33 */
11428 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
11431 /* Disable FW LLDP if FW < v4.3 */
11432 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11433 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
11434 (pf->hw.aq.fw_maj_ver < 4)))
11435 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
11437 /* Use the FW Set LLDP MIB API if FW > v4.40 */
11438 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11439 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
11440 (pf->hw.aq.fw_maj_ver >= 5)))
11441 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
11443 /* Enable PTP L4 if FW > v6.0 */
11444 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11445 pf->hw.aq.fw_maj_ver >= 6)
11446 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
11448 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
11449 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
11450 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
11451 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
11454 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
11455 pf->flags |= I40E_FLAG_IWARP_ENABLED;
11456 /* IWARP needs one extra vector for CQP just like MISC.*/
11457 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
11459 /* Stopping the FW LLDP engine is only supported on the
11460 * XL710 with a FW ver >= 1.7. Also, stopping FW LLDP
11461 * engine is not supported if NPAR is functioning on this
11464 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11465 !pf->hw.func_caps.npar_enable &&
11466 (pf->hw.aq.api_maj_ver > 1 ||
11467 (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6)))
11468 pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP;
11470 #ifdef CONFIG_PCI_IOV
11471 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
11472 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
11473 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
11474 pf->num_req_vfs = min_t(int,
11475 pf->hw.func_caps.num_vfs,
11476 I40E_MAX_VF_COUNT);
11478 #endif /* CONFIG_PCI_IOV */
11479 pf->eeprom_version = 0xDEAD;
11480 pf->lan_veb = I40E_NO_VEB;
11481 pf->lan_vsi = I40E_NO_VSI;
11483 /* By default FW has this off for performance reasons */
11484 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
11486 /* set up queue assignment tracking */
11487 size = sizeof(struct i40e_lump_tracking)
11488 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
11489 pf->qp_pile = kzalloc(size, GFP_KERNEL);
11490 if (!pf->qp_pile) {
11494 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
11496 pf->tx_timeout_recovery_level = 1;
11498 mutex_init(&pf->switch_mutex);
11505 * i40e_set_ntuple - set the ntuple feature flag and take action
11506 * @pf: board private structure to initialize
11507 * @features: the feature set that the stack is suggesting
11509 * returns a bool to indicate if reset needs to happen
11511 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
11513 bool need_reset = false;
11515 /* Check if Flow Director n-tuple support was enabled or disabled. If
11516 * the state changed, we need to reset.
11518 if (features & NETIF_F_NTUPLE) {
11519 /* Enable filters and mark for reset */
11520 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
11522 /* enable FD_SB only if there is MSI-X vector and no cloud
11525 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
11526 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11527 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
11530 /* turn off filters, mark for reset and clear SW filter list */
11531 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11533 i40e_fdir_filter_exit(pf);
11535 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11536 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
11537 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11539 /* reset fd counters */
11540 pf->fd_add_err = 0;
11541 pf->fd_atr_cnt = 0;
11542 /* if ATR was auto disabled it can be re-enabled. */
11543 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
11544 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
11545 (I40E_DEBUG_FD & pf->hw.debug_mask))
11546 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
11552 * i40e_clear_rss_lut - clear the rx hash lookup table
11553 * @vsi: the VSI being configured
11555 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
11557 struct i40e_pf *pf = vsi->back;
11558 struct i40e_hw *hw = &pf->hw;
11559 u16 vf_id = vsi->vf_id;
11562 if (vsi->type == I40E_VSI_MAIN) {
11563 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11564 wr32(hw, I40E_PFQF_HLUT(i), 0);
11565 } else if (vsi->type == I40E_VSI_SRIOV) {
11566 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11567 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
11569 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11574 * i40e_set_features - set the netdev feature flags
11575 * @netdev: ptr to the netdev being adjusted
11576 * @features: the feature set that the stack is suggesting
11577 * Note: expects to be called while under rtnl_lock()
11579 static int i40e_set_features(struct net_device *netdev,
11580 netdev_features_t features)
11582 struct i40e_netdev_priv *np = netdev_priv(netdev);
11583 struct i40e_vsi *vsi = np->vsi;
11584 struct i40e_pf *pf = vsi->back;
11587 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
11588 i40e_pf_config_rss(pf);
11589 else if (!(features & NETIF_F_RXHASH) &&
11590 netdev->features & NETIF_F_RXHASH)
11591 i40e_clear_rss_lut(vsi);
11593 if (features & NETIF_F_HW_VLAN_CTAG_RX)
11594 i40e_vlan_stripping_enable(vsi);
11596 i40e_vlan_stripping_disable(vsi);
11598 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
11599 dev_err(&pf->pdev->dev,
11600 "Offloaded tc filters active, can't turn hw_tc_offload off");
11604 need_reset = i40e_set_ntuple(pf, features);
11607 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11613 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
11614 * @pf: board private structure
11615 * @port: The UDP port to look up
11617 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
11619 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
11623 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
11624 /* Do not report ports with pending deletions as
11627 if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
11629 if (pf->udp_ports[i].port == port)
11637 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
11638 * @netdev: This physical port's netdev
11639 * @ti: Tunnel endpoint information
11641 static void i40e_udp_tunnel_add(struct net_device *netdev,
11642 struct udp_tunnel_info *ti)
11644 struct i40e_netdev_priv *np = netdev_priv(netdev);
11645 struct i40e_vsi *vsi = np->vsi;
11646 struct i40e_pf *pf = vsi->back;
11647 u16 port = ntohs(ti->port);
11651 idx = i40e_get_udp_port_idx(pf, port);
11653 /* Check if port already exists */
11654 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11655 netdev_info(netdev, "port %d already offloaded\n", port);
11659 /* Now check if there is space to add the new port */
11660 next_idx = i40e_get_udp_port_idx(pf, 0);
11662 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11663 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
11668 switch (ti->type) {
11669 case UDP_TUNNEL_TYPE_VXLAN:
11670 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
11672 case UDP_TUNNEL_TYPE_GENEVE:
11673 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
11675 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
11681 /* New port: add it and mark its index in the bitmap */
11682 pf->udp_ports[next_idx].port = port;
11683 pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
11684 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
11685 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
11689 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
11690 * @netdev: This physical port's netdev
11691 * @ti: Tunnel endpoint information
11693 static void i40e_udp_tunnel_del(struct net_device *netdev,
11694 struct udp_tunnel_info *ti)
11696 struct i40e_netdev_priv *np = netdev_priv(netdev);
11697 struct i40e_vsi *vsi = np->vsi;
11698 struct i40e_pf *pf = vsi->back;
11699 u16 port = ntohs(ti->port);
11702 idx = i40e_get_udp_port_idx(pf, port);
11704 /* Check if port already exists */
11705 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
11708 switch (ti->type) {
11709 case UDP_TUNNEL_TYPE_VXLAN:
11710 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
11713 case UDP_TUNNEL_TYPE_GENEVE:
11714 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
11721 /* if port exists, set it to 0 (mark for deletion)
11722 * and make it pending
11724 pf->udp_ports[idx].port = 0;
11726 /* Toggle pending bit instead of setting it. This way if we are
11727 * deleting a port that has yet to be added we just clear the pending
11728 * bit and don't have to worry about it.
11730 pf->pending_udp_bitmap ^= BIT_ULL(idx);
11731 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
11735 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
11739 static int i40e_get_phys_port_id(struct net_device *netdev,
11740 struct netdev_phys_item_id *ppid)
11742 struct i40e_netdev_priv *np = netdev_priv(netdev);
11743 struct i40e_pf *pf = np->vsi->back;
11744 struct i40e_hw *hw = &pf->hw;
11746 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
11747 return -EOPNOTSUPP;
11749 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
11750 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
11756 * i40e_ndo_fdb_add - add an entry to the hardware database
11757 * @ndm: the input from the stack
11758 * @tb: pointer to array of nladdr (unused)
11759 * @dev: the net device pointer
11760 * @addr: the MAC address entry being added
11762 * @flags: instructions from stack about fdb operation
11764 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
11765 struct net_device *dev,
11766 const unsigned char *addr, u16 vid,
11769 struct i40e_netdev_priv *np = netdev_priv(dev);
11770 struct i40e_pf *pf = np->vsi->back;
11773 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
11774 return -EOPNOTSUPP;
11777 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
11781 /* Hardware does not support aging addresses so if a
11782 * ndm_state is given only allow permanent addresses
11784 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
11785 netdev_info(dev, "FDB only supports static addresses\n");
11789 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
11790 err = dev_uc_add_excl(dev, addr);
11791 else if (is_multicast_ether_addr(addr))
11792 err = dev_mc_add_excl(dev, addr);
11796 /* Only return duplicate errors if NLM_F_EXCL is set */
11797 if (err == -EEXIST && !(flags & NLM_F_EXCL))
11804 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
11805 * @dev: the netdev being configured
11806 * @nlh: RTNL message
11807 * @flags: bridge flags
11809 * Inserts a new hardware bridge if not already created and
11810 * enables the bridging mode requested (VEB or VEPA). If the
11811 * hardware bridge has already been inserted and the request
11812 * is to change the mode then that requires a PF reset to
11813 * allow rebuild of the components with required hardware
11814 * bridge mode enabled.
11816 * Note: expects to be called while under rtnl_lock()
11818 static int i40e_ndo_bridge_setlink(struct net_device *dev,
11819 struct nlmsghdr *nlh,
11822 struct i40e_netdev_priv *np = netdev_priv(dev);
11823 struct i40e_vsi *vsi = np->vsi;
11824 struct i40e_pf *pf = vsi->back;
11825 struct i40e_veb *veb = NULL;
11826 struct nlattr *attr, *br_spec;
11829 /* Only for PF VSI for now */
11830 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11831 return -EOPNOTSUPP;
11833 /* Find the HW bridge for PF VSI */
11834 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11835 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11839 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11843 nla_for_each_nested(attr, br_spec, rem) {
11846 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11849 mode = nla_get_u16(attr);
11850 if ((mode != BRIDGE_MODE_VEPA) &&
11851 (mode != BRIDGE_MODE_VEB))
11854 /* Insert a new HW bridge */
11856 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
11857 vsi->tc_config.enabled_tc);
11859 veb->bridge_mode = mode;
11860 i40e_config_bridge_mode(veb);
11862 /* No Bridge HW offload available */
11866 } else if (mode != veb->bridge_mode) {
11867 /* Existing HW bridge but different mode needs reset */
11868 veb->bridge_mode = mode;
11869 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
11870 if (mode == BRIDGE_MODE_VEB)
11871 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11873 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
11874 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11883 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
11886 * @seq: RTNL message seq #
11887 * @dev: the netdev being configured
11888 * @filter_mask: unused
11889 * @nlflags: netlink flags passed in
11891 * Return the mode in which the hardware bridge is operating in
11894 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11895 struct net_device *dev,
11896 u32 __always_unused filter_mask,
11899 struct i40e_netdev_priv *np = netdev_priv(dev);
11900 struct i40e_vsi *vsi = np->vsi;
11901 struct i40e_pf *pf = vsi->back;
11902 struct i40e_veb *veb = NULL;
11905 /* Only for PF VSI for now */
11906 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11907 return -EOPNOTSUPP;
11909 /* Find the HW bridge for the PF VSI */
11910 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11911 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11918 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
11919 0, 0, nlflags, filter_mask, NULL);
11923 * i40e_features_check - Validate encapsulated packet conforms to limits
11925 * @dev: This physical port's netdev
11926 * @features: Offload features that the stack believes apply
11928 static netdev_features_t i40e_features_check(struct sk_buff *skb,
11929 struct net_device *dev,
11930 netdev_features_t features)
11934 /* No point in doing any of this if neither checksum nor GSO are
11935 * being requested for this frame. We can rule out both by just
11936 * checking for CHECKSUM_PARTIAL
11938 if (skb->ip_summed != CHECKSUM_PARTIAL)
11941 /* We cannot support GSO if the MSS is going to be less than
11942 * 64 bytes. If it is then we need to drop support for GSO.
11944 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
11945 features &= ~NETIF_F_GSO_MASK;
11947 /* MACLEN can support at most 63 words */
11948 len = skb_network_header(skb) - skb->data;
11949 if (len & ~(63 * 2))
11952 /* IPLEN and EIPLEN can support at most 127 dwords */
11953 len = skb_transport_header(skb) - skb_network_header(skb);
11954 if (len & ~(127 * 4))
11957 if (skb->encapsulation) {
11958 /* L4TUNLEN can support 127 words */
11959 len = skb_inner_network_header(skb) - skb_transport_header(skb);
11960 if (len & ~(127 * 2))
11963 /* IPLEN can support at most 127 dwords */
11964 len = skb_inner_transport_header(skb) -
11965 skb_inner_network_header(skb);
11966 if (len & ~(127 * 4))
11970 /* No need to validate L4LEN as TCP is the only protocol with a
11971 * a flexible value and we support all possible values supported
11972 * by TCP, which is at most 15 dwords
11977 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11981 * i40e_xdp_setup - add/remove an XDP program
11982 * @vsi: VSI to changed
11983 * @prog: XDP program
11985 static int i40e_xdp_setup(struct i40e_vsi *vsi,
11986 struct bpf_prog *prog)
11988 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
11989 struct i40e_pf *pf = vsi->back;
11990 struct bpf_prog *old_prog;
11994 /* Don't allow frames that span over multiple buffers */
11995 if (frame_size > vsi->rx_buf_len)
11998 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
12001 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
12002 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
12005 i40e_prep_for_reset(pf, true);
12007 old_prog = xchg(&vsi->xdp_prog, prog);
12010 i40e_reset_and_rebuild(pf, true, true);
12012 for (i = 0; i < vsi->num_queue_pairs; i++)
12013 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
12016 bpf_prog_put(old_prog);
12022 * i40e_xdp - implements ndo_bpf for i40e
12024 * @xdp: XDP command
12026 static int i40e_xdp(struct net_device *dev,
12027 struct netdev_bpf *xdp)
12029 struct i40e_netdev_priv *np = netdev_priv(dev);
12030 struct i40e_vsi *vsi = np->vsi;
12032 if (vsi->type != I40E_VSI_MAIN)
12035 switch (xdp->command) {
12036 case XDP_SETUP_PROG:
12037 return i40e_xdp_setup(vsi, xdp->prog);
12038 case XDP_QUERY_PROG:
12039 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
12046 static const struct net_device_ops i40e_netdev_ops = {
12047 .ndo_open = i40e_open,
12048 .ndo_stop = i40e_close,
12049 .ndo_start_xmit = i40e_lan_xmit_frame,
12050 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
12051 .ndo_set_rx_mode = i40e_set_rx_mode,
12052 .ndo_validate_addr = eth_validate_addr,
12053 .ndo_set_mac_address = i40e_set_mac,
12054 .ndo_change_mtu = i40e_change_mtu,
12055 .ndo_do_ioctl = i40e_ioctl,
12056 .ndo_tx_timeout = i40e_tx_timeout,
12057 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
12058 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
12059 #ifdef CONFIG_NET_POLL_CONTROLLER
12060 .ndo_poll_controller = i40e_netpoll,
12062 .ndo_setup_tc = __i40e_setup_tc,
12063 .ndo_set_features = i40e_set_features,
12064 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
12065 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
12066 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
12067 .ndo_get_vf_config = i40e_ndo_get_vf_config,
12068 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
12069 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
12070 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
12071 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
12072 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
12073 .ndo_get_phys_port_id = i40e_get_phys_port_id,
12074 .ndo_fdb_add = i40e_ndo_fdb_add,
12075 .ndo_features_check = i40e_features_check,
12076 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
12077 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
12078 .ndo_bpf = i40e_xdp,
12079 .ndo_xdp_xmit = i40e_xdp_xmit,
12083 * i40e_config_netdev - Setup the netdev flags
12084 * @vsi: the VSI being configured
12086 * Returns 0 on success, negative value on failure
12088 static int i40e_config_netdev(struct i40e_vsi *vsi)
12090 struct i40e_pf *pf = vsi->back;
12091 struct i40e_hw *hw = &pf->hw;
12092 struct i40e_netdev_priv *np;
12093 struct net_device *netdev;
12094 u8 broadcast[ETH_ALEN];
12095 u8 mac_addr[ETH_ALEN];
12097 netdev_features_t hw_enc_features;
12098 netdev_features_t hw_features;
12100 etherdev_size = sizeof(struct i40e_netdev_priv);
12101 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
12105 vsi->netdev = netdev;
12106 np = netdev_priv(netdev);
12109 hw_enc_features = NETIF_F_SG |
12111 NETIF_F_IPV6_CSUM |
12113 NETIF_F_SOFT_FEATURES |
12118 NETIF_F_GSO_GRE_CSUM |
12119 NETIF_F_GSO_PARTIAL |
12120 NETIF_F_GSO_IPXIP4 |
12121 NETIF_F_GSO_IPXIP6 |
12122 NETIF_F_GSO_UDP_TUNNEL |
12123 NETIF_F_GSO_UDP_TUNNEL_CSUM |
12129 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
12130 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
12132 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
12134 netdev->hw_enc_features |= hw_enc_features;
12136 /* record features VLANs can make use of */
12137 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
12139 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12140 netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
12142 hw_features = hw_enc_features |
12143 NETIF_F_HW_VLAN_CTAG_TX |
12144 NETIF_F_HW_VLAN_CTAG_RX;
12146 netdev->hw_features |= hw_features;
12148 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
12149 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
12151 if (vsi->type == I40E_VSI_MAIN) {
12152 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
12153 ether_addr_copy(mac_addr, hw->mac.perm_addr);
12154 /* The following steps are necessary for two reasons. First,
12155 * some older NVM configurations load a default MAC-VLAN
12156 * filter that will accept any tagged packet, and we want to
12157 * replace this with a normal filter. Additionally, it is
12158 * possible our MAC address was provided by the platform using
12159 * Open Firmware or similar.
12161 * Thus, we need to remove the default filter and install one
12162 * specific to the MAC address.
12164 i40e_rm_default_mac_filter(vsi, mac_addr);
12165 spin_lock_bh(&vsi->mac_filter_hash_lock);
12166 i40e_add_mac_filter(vsi, mac_addr);
12167 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12169 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
12170 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
12171 * the end, which is 4 bytes long, so force truncation of the
12172 * original name by IFNAMSIZ - 4
12174 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
12176 pf->vsi[pf->lan_vsi]->netdev->name);
12177 eth_random_addr(mac_addr);
12179 spin_lock_bh(&vsi->mac_filter_hash_lock);
12180 i40e_add_mac_filter(vsi, mac_addr);
12181 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12184 /* Add the broadcast filter so that we initially will receive
12185 * broadcast packets. Note that when a new VLAN is first added the
12186 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
12187 * specific filters as part of transitioning into "vlan" operation.
12188 * When more VLANs are added, the driver will copy each existing MAC
12189 * filter and add it for the new VLAN.
12191 * Broadcast filters are handled specially by
12192 * i40e_sync_filters_subtask, as the driver must to set the broadcast
12193 * promiscuous bit instead of adding this directly as a MAC/VLAN
12194 * filter. The subtask will update the correct broadcast promiscuous
12195 * bits as VLANs become active or inactive.
12197 eth_broadcast_addr(broadcast);
12198 spin_lock_bh(&vsi->mac_filter_hash_lock);
12199 i40e_add_mac_filter(vsi, broadcast);
12200 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12202 ether_addr_copy(netdev->dev_addr, mac_addr);
12203 ether_addr_copy(netdev->perm_addr, mac_addr);
12205 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
12206 netdev->neigh_priv_len = sizeof(u32) * 4;
12208 netdev->priv_flags |= IFF_UNICAST_FLT;
12209 netdev->priv_flags |= IFF_SUPP_NOFCS;
12210 /* Setup netdev TC information */
12211 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
12213 netdev->netdev_ops = &i40e_netdev_ops;
12214 netdev->watchdog_timeo = 5 * HZ;
12215 i40e_set_ethtool_ops(netdev);
12217 /* MTU range: 68 - 9706 */
12218 netdev->min_mtu = ETH_MIN_MTU;
12219 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
12225 * i40e_vsi_delete - Delete a VSI from the switch
12226 * @vsi: the VSI being removed
12228 * Returns 0 on success, negative value on failure
12230 static void i40e_vsi_delete(struct i40e_vsi *vsi)
12232 /* remove default VSI is not allowed */
12233 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
12236 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
12240 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
12241 * @vsi: the VSI being queried
12243 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
12245 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
12247 struct i40e_veb *veb;
12248 struct i40e_pf *pf = vsi->back;
12250 /* Uplink is not a bridge so default to VEB */
12251 if (vsi->veb_idx == I40E_NO_VEB)
12254 veb = pf->veb[vsi->veb_idx];
12256 dev_info(&pf->pdev->dev,
12257 "There is no veb associated with the bridge\n");
12261 /* Uplink is a bridge in VEPA mode */
12262 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
12265 /* Uplink is a bridge in VEB mode */
12269 /* VEPA is now default bridge, so return 0 */
12274 * i40e_add_vsi - Add a VSI to the switch
12275 * @vsi: the VSI being configured
12277 * This initializes a VSI context depending on the VSI type to be added and
12278 * passes it down to the add_vsi aq command.
12280 static int i40e_add_vsi(struct i40e_vsi *vsi)
12283 struct i40e_pf *pf = vsi->back;
12284 struct i40e_hw *hw = &pf->hw;
12285 struct i40e_vsi_context ctxt;
12286 struct i40e_mac_filter *f;
12287 struct hlist_node *h;
12290 u8 enabled_tc = 0x1; /* TC0 enabled */
12293 memset(&ctxt, 0, sizeof(ctxt));
12294 switch (vsi->type) {
12295 case I40E_VSI_MAIN:
12296 /* The PF's main VSI is already setup as part of the
12297 * device initialization, so we'll not bother with
12298 * the add_vsi call, but we will retrieve the current
12301 ctxt.seid = pf->main_vsi_seid;
12302 ctxt.pf_num = pf->hw.pf_id;
12304 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
12305 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
12307 dev_info(&pf->pdev->dev,
12308 "couldn't get PF vsi config, err %s aq_err %s\n",
12309 i40e_stat_str(&pf->hw, ret),
12310 i40e_aq_str(&pf->hw,
12311 pf->hw.aq.asq_last_status));
12314 vsi->info = ctxt.info;
12315 vsi->info.valid_sections = 0;
12317 vsi->seid = ctxt.seid;
12318 vsi->id = ctxt.vsi_number;
12320 enabled_tc = i40e_pf_get_tc_map(pf);
12322 /* Source pruning is enabled by default, so the flag is
12323 * negative logic - if it's set, we need to fiddle with
12324 * the VSI to disable source pruning.
12326 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
12327 memset(&ctxt, 0, sizeof(ctxt));
12328 ctxt.seid = pf->main_vsi_seid;
12329 ctxt.pf_num = pf->hw.pf_id;
12331 ctxt.info.valid_sections |=
12332 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12333 ctxt.info.switch_id =
12334 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
12335 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
12337 dev_info(&pf->pdev->dev,
12338 "update vsi failed, err %s aq_err %s\n",
12339 i40e_stat_str(&pf->hw, ret),
12340 i40e_aq_str(&pf->hw,
12341 pf->hw.aq.asq_last_status));
12347 /* MFP mode setup queue map and update VSI */
12348 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
12349 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
12350 memset(&ctxt, 0, sizeof(ctxt));
12351 ctxt.seid = pf->main_vsi_seid;
12352 ctxt.pf_num = pf->hw.pf_id;
12354 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
12355 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
12357 dev_info(&pf->pdev->dev,
12358 "update vsi failed, err %s aq_err %s\n",
12359 i40e_stat_str(&pf->hw, ret),
12360 i40e_aq_str(&pf->hw,
12361 pf->hw.aq.asq_last_status));
12365 /* update the local VSI info queue map */
12366 i40e_vsi_update_queue_map(vsi, &ctxt);
12367 vsi->info.valid_sections = 0;
12369 /* Default/Main VSI is only enabled for TC0
12370 * reconfigure it to enable all TCs that are
12371 * available on the port in SFP mode.
12372 * For MFP case the iSCSI PF would use this
12373 * flow to enable LAN+iSCSI TC.
12375 ret = i40e_vsi_config_tc(vsi, enabled_tc);
12377 /* Single TC condition is not fatal,
12378 * message and continue
12380 dev_info(&pf->pdev->dev,
12381 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
12383 i40e_stat_str(&pf->hw, ret),
12384 i40e_aq_str(&pf->hw,
12385 pf->hw.aq.asq_last_status));
12390 case I40E_VSI_FDIR:
12391 ctxt.pf_num = hw->pf_id;
12393 ctxt.uplink_seid = vsi->uplink_seid;
12394 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12395 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
12396 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
12397 (i40e_is_vsi_uplink_mode_veb(vsi))) {
12398 ctxt.info.valid_sections |=
12399 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12400 ctxt.info.switch_id =
12401 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12403 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12406 case I40E_VSI_VMDQ2:
12407 ctxt.pf_num = hw->pf_id;
12409 ctxt.uplink_seid = vsi->uplink_seid;
12410 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12411 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
12413 /* This VSI is connected to VEB so the switch_id
12414 * should be set to zero by default.
12416 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12417 ctxt.info.valid_sections |=
12418 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12419 ctxt.info.switch_id =
12420 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12423 /* Setup the VSI tx/rx queue map for TC0 only for now */
12424 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12427 case I40E_VSI_SRIOV:
12428 ctxt.pf_num = hw->pf_id;
12429 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
12430 ctxt.uplink_seid = vsi->uplink_seid;
12431 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12432 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
12434 /* This VSI is connected to VEB so the switch_id
12435 * should be set to zero by default.
12437 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12438 ctxt.info.valid_sections |=
12439 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12440 ctxt.info.switch_id =
12441 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12444 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
12445 ctxt.info.valid_sections |=
12446 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
12447 ctxt.info.queueing_opt_flags |=
12448 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
12449 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
12452 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
12453 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
12454 if (pf->vf[vsi->vf_id].spoofchk) {
12455 ctxt.info.valid_sections |=
12456 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
12457 ctxt.info.sec_flags |=
12458 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
12459 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
12461 /* Setup the VSI tx/rx queue map for TC0 only for now */
12462 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12465 case I40E_VSI_IWARP:
12466 /* send down message to iWARP */
12473 if (vsi->type != I40E_VSI_MAIN) {
12474 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
12476 dev_info(&vsi->back->pdev->dev,
12477 "add vsi failed, err %s aq_err %s\n",
12478 i40e_stat_str(&pf->hw, ret),
12479 i40e_aq_str(&pf->hw,
12480 pf->hw.aq.asq_last_status));
12484 vsi->info = ctxt.info;
12485 vsi->info.valid_sections = 0;
12486 vsi->seid = ctxt.seid;
12487 vsi->id = ctxt.vsi_number;
12490 spin_lock_bh(&vsi->mac_filter_hash_lock);
12491 vsi->active_filters = 0;
12492 /* If macvlan filters already exist, force them to get loaded */
12493 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
12494 f->state = I40E_FILTER_NEW;
12497 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12498 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
12501 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
12502 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
12505 /* Update VSI BW information */
12506 ret = i40e_vsi_get_bw_info(vsi);
12508 dev_info(&pf->pdev->dev,
12509 "couldn't get vsi bw info, err %s aq_err %s\n",
12510 i40e_stat_str(&pf->hw, ret),
12511 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
12512 /* VSI is already added so not tearing that up */
12521 * i40e_vsi_release - Delete a VSI and free its resources
12522 * @vsi: the VSI being removed
12524 * Returns 0 on success or < 0 on error
12526 int i40e_vsi_release(struct i40e_vsi *vsi)
12528 struct i40e_mac_filter *f;
12529 struct hlist_node *h;
12530 struct i40e_veb *veb = NULL;
12531 struct i40e_pf *pf;
12537 /* release of a VEB-owner or last VSI is not allowed */
12538 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
12539 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
12540 vsi->seid, vsi->uplink_seid);
12543 if (vsi == pf->vsi[pf->lan_vsi] &&
12544 !test_bit(__I40E_DOWN, pf->state)) {
12545 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
12548 set_bit(__I40E_VSI_RELEASING, vsi->state);
12549 uplink_seid = vsi->uplink_seid;
12550 if (vsi->type != I40E_VSI_SRIOV) {
12551 if (vsi->netdev_registered) {
12552 vsi->netdev_registered = false;
12554 /* results in a call to i40e_close() */
12555 unregister_netdev(vsi->netdev);
12558 i40e_vsi_close(vsi);
12560 i40e_vsi_disable_irq(vsi);
12563 spin_lock_bh(&vsi->mac_filter_hash_lock);
12565 /* clear the sync flag on all filters */
12567 __dev_uc_unsync(vsi->netdev, NULL);
12568 __dev_mc_unsync(vsi->netdev, NULL);
12571 /* make sure any remaining filters are marked for deletion */
12572 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
12573 __i40e_del_filter(vsi, f);
12575 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12577 i40e_sync_vsi_filters(vsi);
12579 i40e_vsi_delete(vsi);
12580 i40e_vsi_free_q_vectors(vsi);
12582 free_netdev(vsi->netdev);
12583 vsi->netdev = NULL;
12585 i40e_vsi_clear_rings(vsi);
12586 i40e_vsi_clear(vsi);
12588 /* If this was the last thing on the VEB, except for the
12589 * controlling VSI, remove the VEB, which puts the controlling
12590 * VSI onto the next level down in the switch.
12592 * Well, okay, there's one more exception here: don't remove
12593 * the orphan VEBs yet. We'll wait for an explicit remove request
12594 * from up the network stack.
12596 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
12598 pf->vsi[i]->uplink_seid == uplink_seid &&
12599 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
12600 n++; /* count the VSIs */
12603 for (i = 0; i < I40E_MAX_VEB; i++) {
12606 if (pf->veb[i]->uplink_seid == uplink_seid)
12607 n++; /* count the VEBs */
12608 if (pf->veb[i]->seid == uplink_seid)
12611 if (n == 0 && veb && veb->uplink_seid != 0)
12612 i40e_veb_release(veb);
12618 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
12619 * @vsi: ptr to the VSI
12621 * This should only be called after i40e_vsi_mem_alloc() which allocates the
12622 * corresponding SW VSI structure and initializes num_queue_pairs for the
12623 * newly allocated VSI.
12625 * Returns 0 on success or negative on failure
12627 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
12630 struct i40e_pf *pf = vsi->back;
12632 if (vsi->q_vectors[0]) {
12633 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
12638 if (vsi->base_vector) {
12639 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
12640 vsi->seid, vsi->base_vector);
12644 ret = i40e_vsi_alloc_q_vectors(vsi);
12646 dev_info(&pf->pdev->dev,
12647 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
12648 vsi->num_q_vectors, vsi->seid, ret);
12649 vsi->num_q_vectors = 0;
12650 goto vector_setup_out;
12653 /* In Legacy mode, we do not have to get any other vector since we
12654 * piggyback on the misc/ICR0 for queue interrupts.
12656 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
12658 if (vsi->num_q_vectors)
12659 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
12660 vsi->num_q_vectors, vsi->idx);
12661 if (vsi->base_vector < 0) {
12662 dev_info(&pf->pdev->dev,
12663 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
12664 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
12665 i40e_vsi_free_q_vectors(vsi);
12667 goto vector_setup_out;
12675 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
12676 * @vsi: pointer to the vsi.
12678 * This re-allocates a vsi's queue resources.
12680 * Returns pointer to the successfully allocated and configured VSI sw struct
12681 * on success, otherwise returns NULL on failure.
12683 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
12685 u16 alloc_queue_pairs;
12686 struct i40e_pf *pf;
12695 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
12696 i40e_vsi_clear_rings(vsi);
12698 i40e_vsi_free_arrays(vsi, false);
12699 i40e_set_num_rings_in_vsi(vsi);
12700 ret = i40e_vsi_alloc_arrays(vsi, false);
12704 alloc_queue_pairs = vsi->alloc_queue_pairs *
12705 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12707 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
12709 dev_info(&pf->pdev->dev,
12710 "failed to get tracking for %d queues for VSI %d err %d\n",
12711 alloc_queue_pairs, vsi->seid, ret);
12714 vsi->base_queue = ret;
12716 /* Update the FW view of the VSI. Force a reset of TC and queue
12717 * layout configurations.
12719 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
12720 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
12721 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
12722 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
12723 if (vsi->type == I40E_VSI_MAIN)
12724 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
12726 /* assign it some queues */
12727 ret = i40e_alloc_rings(vsi);
12731 /* map all of the rings to the q_vectors */
12732 i40e_vsi_map_rings_to_vectors(vsi);
12736 i40e_vsi_free_q_vectors(vsi);
12737 if (vsi->netdev_registered) {
12738 vsi->netdev_registered = false;
12739 unregister_netdev(vsi->netdev);
12740 free_netdev(vsi->netdev);
12741 vsi->netdev = NULL;
12743 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
12745 i40e_vsi_clear(vsi);
12750 * i40e_vsi_setup - Set up a VSI by a given type
12751 * @pf: board private structure
12753 * @uplink_seid: the switch element to link to
12754 * @param1: usage depends upon VSI type. For VF types, indicates VF id
12756 * This allocates the sw VSI structure and its queue resources, then add a VSI
12757 * to the identified VEB.
12759 * Returns pointer to the successfully allocated and configure VSI sw struct on
12760 * success, otherwise returns NULL on failure.
12762 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
12763 u16 uplink_seid, u32 param1)
12765 struct i40e_vsi *vsi = NULL;
12766 struct i40e_veb *veb = NULL;
12767 u16 alloc_queue_pairs;
12771 /* The requested uplink_seid must be either
12772 * - the PF's port seid
12773 * no VEB is needed because this is the PF
12774 * or this is a Flow Director special case VSI
12775 * - seid of an existing VEB
12776 * - seid of a VSI that owns an existing VEB
12777 * - seid of a VSI that doesn't own a VEB
12778 * a new VEB is created and the VSI becomes the owner
12779 * - seid of the PF VSI, which is what creates the first VEB
12780 * this is a special case of the previous
12782 * Find which uplink_seid we were given and create a new VEB if needed
12784 for (i = 0; i < I40E_MAX_VEB; i++) {
12785 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
12791 if (!veb && uplink_seid != pf->mac_seid) {
12793 for (i = 0; i < pf->num_alloc_vsi; i++) {
12794 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
12800 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
12805 if (vsi->uplink_seid == pf->mac_seid)
12806 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
12807 vsi->tc_config.enabled_tc);
12808 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
12809 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12810 vsi->tc_config.enabled_tc);
12812 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
12813 dev_info(&vsi->back->pdev->dev,
12814 "New VSI creation error, uplink seid of LAN VSI expected.\n");
12817 /* We come up by default in VEPA mode if SRIOV is not
12818 * already enabled, in which case we can't force VEPA
12821 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
12822 veb->bridge_mode = BRIDGE_MODE_VEPA;
12823 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12825 i40e_config_bridge_mode(veb);
12827 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12828 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12832 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
12836 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
12837 uplink_seid = veb->seid;
12840 /* get vsi sw struct */
12841 v_idx = i40e_vsi_mem_alloc(pf, type);
12844 vsi = pf->vsi[v_idx];
12848 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
12850 if (type == I40E_VSI_MAIN)
12851 pf->lan_vsi = v_idx;
12852 else if (type == I40E_VSI_SRIOV)
12853 vsi->vf_id = param1;
12854 /* assign it some queues */
12855 alloc_queue_pairs = vsi->alloc_queue_pairs *
12856 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12858 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
12860 dev_info(&pf->pdev->dev,
12861 "failed to get tracking for %d queues for VSI %d err=%d\n",
12862 alloc_queue_pairs, vsi->seid, ret);
12865 vsi->base_queue = ret;
12867 /* get a VSI from the hardware */
12868 vsi->uplink_seid = uplink_seid;
12869 ret = i40e_add_vsi(vsi);
12873 switch (vsi->type) {
12874 /* setup the netdev if needed */
12875 case I40E_VSI_MAIN:
12876 case I40E_VSI_VMDQ2:
12877 ret = i40e_config_netdev(vsi);
12880 ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
12883 ret = register_netdev(vsi->netdev);
12886 vsi->netdev_registered = true;
12887 netif_carrier_off(vsi->netdev);
12888 #ifdef CONFIG_I40E_DCB
12889 /* Setup DCB netlink interface */
12890 i40e_dcbnl_setup(vsi);
12891 #endif /* CONFIG_I40E_DCB */
12894 case I40E_VSI_FDIR:
12895 /* set up vectors and rings if needed */
12896 ret = i40e_vsi_setup_vectors(vsi);
12900 ret = i40e_alloc_rings(vsi);
12904 /* map all of the rings to the q_vectors */
12905 i40e_vsi_map_rings_to_vectors(vsi);
12907 i40e_vsi_reset_stats(vsi);
12911 /* no netdev or rings for the other VSI types */
12915 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
12916 (vsi->type == I40E_VSI_VMDQ2)) {
12917 ret = i40e_vsi_config_rss(vsi);
12922 i40e_vsi_free_q_vectors(vsi);
12924 if (vsi->netdev_registered) {
12925 vsi->netdev_registered = false;
12926 unregister_netdev(vsi->netdev);
12927 free_netdev(vsi->netdev);
12928 vsi->netdev = NULL;
12931 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
12933 i40e_vsi_clear(vsi);
12939 * i40e_veb_get_bw_info - Query VEB BW information
12940 * @veb: the veb to query
12942 * Query the Tx scheduler BW configuration data for given VEB
12944 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
12946 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
12947 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
12948 struct i40e_pf *pf = veb->pf;
12949 struct i40e_hw *hw = &pf->hw;
12954 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
12957 dev_info(&pf->pdev->dev,
12958 "query veb bw config failed, err %s aq_err %s\n",
12959 i40e_stat_str(&pf->hw, ret),
12960 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
12964 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
12967 dev_info(&pf->pdev->dev,
12968 "query veb bw ets config failed, err %s aq_err %s\n",
12969 i40e_stat_str(&pf->hw, ret),
12970 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
12974 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
12975 veb->bw_max_quanta = ets_data.tc_bw_max;
12976 veb->is_abs_credits = bw_data.absolute_credits_enable;
12977 veb->enabled_tc = ets_data.tc_valid_bits;
12978 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
12979 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
12980 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
12981 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
12982 veb->bw_tc_limit_credits[i] =
12983 le16_to_cpu(bw_data.tc_bw_limits[i]);
12984 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
12992 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
12993 * @pf: board private structure
12995 * On error: returns error code (negative)
12996 * On success: returns vsi index in PF (positive)
12998 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
13001 struct i40e_veb *veb;
13004 /* Need to protect the allocation of switch elements at the PF level */
13005 mutex_lock(&pf->switch_mutex);
13007 /* VEB list may be fragmented if VEB creation/destruction has
13008 * been happening. We can afford to do a quick scan to look
13009 * for any free slots in the list.
13011 * find next empty veb slot, looping back around if necessary
13014 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
13016 if (i >= I40E_MAX_VEB) {
13018 goto err_alloc_veb; /* out of VEB slots! */
13021 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
13024 goto err_alloc_veb;
13028 veb->enabled_tc = 1;
13033 mutex_unlock(&pf->switch_mutex);
13038 * i40e_switch_branch_release - Delete a branch of the switch tree
13039 * @branch: where to start deleting
13041 * This uses recursion to find the tips of the branch to be
13042 * removed, deleting until we get back to and can delete this VEB.
13044 static void i40e_switch_branch_release(struct i40e_veb *branch)
13046 struct i40e_pf *pf = branch->pf;
13047 u16 branch_seid = branch->seid;
13048 u16 veb_idx = branch->idx;
13051 /* release any VEBs on this VEB - RECURSION */
13052 for (i = 0; i < I40E_MAX_VEB; i++) {
13055 if (pf->veb[i]->uplink_seid == branch->seid)
13056 i40e_switch_branch_release(pf->veb[i]);
13059 /* Release the VSIs on this VEB, but not the owner VSI.
13061 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
13062 * the VEB itself, so don't use (*branch) after this loop.
13064 for (i = 0; i < pf->num_alloc_vsi; i++) {
13067 if (pf->vsi[i]->uplink_seid == branch_seid &&
13068 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13069 i40e_vsi_release(pf->vsi[i]);
13073 /* There's one corner case where the VEB might not have been
13074 * removed, so double check it here and remove it if needed.
13075 * This case happens if the veb was created from the debugfs
13076 * commands and no VSIs were added to it.
13078 if (pf->veb[veb_idx])
13079 i40e_veb_release(pf->veb[veb_idx]);
13083 * i40e_veb_clear - remove veb struct
13084 * @veb: the veb to remove
13086 static void i40e_veb_clear(struct i40e_veb *veb)
13092 struct i40e_pf *pf = veb->pf;
13094 mutex_lock(&pf->switch_mutex);
13095 if (pf->veb[veb->idx] == veb)
13096 pf->veb[veb->idx] = NULL;
13097 mutex_unlock(&pf->switch_mutex);
13104 * i40e_veb_release - Delete a VEB and free its resources
13105 * @veb: the VEB being removed
13107 void i40e_veb_release(struct i40e_veb *veb)
13109 struct i40e_vsi *vsi = NULL;
13110 struct i40e_pf *pf;
13115 /* find the remaining VSI and check for extras */
13116 for (i = 0; i < pf->num_alloc_vsi; i++) {
13117 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
13123 dev_info(&pf->pdev->dev,
13124 "can't remove VEB %d with %d VSIs left\n",
13129 /* move the remaining VSI to uplink veb */
13130 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
13131 if (veb->uplink_seid) {
13132 vsi->uplink_seid = veb->uplink_seid;
13133 if (veb->uplink_seid == pf->mac_seid)
13134 vsi->veb_idx = I40E_NO_VEB;
13136 vsi->veb_idx = veb->veb_idx;
13139 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
13140 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
13143 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13144 i40e_veb_clear(veb);
13148 * i40e_add_veb - create the VEB in the switch
13149 * @veb: the VEB to be instantiated
13150 * @vsi: the controlling VSI
13152 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
13154 struct i40e_pf *pf = veb->pf;
13155 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
13158 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
13159 veb->enabled_tc, false,
13160 &veb->seid, enable_stats, NULL);
13162 /* get a VEB from the hardware */
13164 dev_info(&pf->pdev->dev,
13165 "couldn't add VEB, err %s aq_err %s\n",
13166 i40e_stat_str(&pf->hw, ret),
13167 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13171 /* get statistics counter */
13172 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
13173 &veb->stats_idx, NULL, NULL, NULL);
13175 dev_info(&pf->pdev->dev,
13176 "couldn't get VEB statistics idx, err %s aq_err %s\n",
13177 i40e_stat_str(&pf->hw, ret),
13178 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13181 ret = i40e_veb_get_bw_info(veb);
13183 dev_info(&pf->pdev->dev,
13184 "couldn't get VEB bw info, err %s aq_err %s\n",
13185 i40e_stat_str(&pf->hw, ret),
13186 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13187 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13191 vsi->uplink_seid = veb->seid;
13192 vsi->veb_idx = veb->idx;
13193 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13199 * i40e_veb_setup - Set up a VEB
13200 * @pf: board private structure
13201 * @flags: VEB setup flags
13202 * @uplink_seid: the switch element to link to
13203 * @vsi_seid: the initial VSI seid
13204 * @enabled_tc: Enabled TC bit-map
13206 * This allocates the sw VEB structure and links it into the switch
13207 * It is possible and legal for this to be a duplicate of an already
13208 * existing VEB. It is also possible for both uplink and vsi seids
13209 * to be zero, in order to create a floating VEB.
13211 * Returns pointer to the successfully allocated VEB sw struct on
13212 * success, otherwise returns NULL on failure.
13214 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
13215 u16 uplink_seid, u16 vsi_seid,
13218 struct i40e_veb *veb, *uplink_veb = NULL;
13219 int vsi_idx, veb_idx;
13222 /* if one seid is 0, the other must be 0 to create a floating relay */
13223 if ((uplink_seid == 0 || vsi_seid == 0) &&
13224 (uplink_seid + vsi_seid != 0)) {
13225 dev_info(&pf->pdev->dev,
13226 "one, not both seid's are 0: uplink=%d vsi=%d\n",
13227 uplink_seid, vsi_seid);
13231 /* make sure there is such a vsi and uplink */
13232 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
13233 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
13235 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
13236 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
13241 if (uplink_seid && uplink_seid != pf->mac_seid) {
13242 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
13243 if (pf->veb[veb_idx] &&
13244 pf->veb[veb_idx]->seid == uplink_seid) {
13245 uplink_veb = pf->veb[veb_idx];
13250 dev_info(&pf->pdev->dev,
13251 "uplink seid %d not found\n", uplink_seid);
13256 /* get veb sw struct */
13257 veb_idx = i40e_veb_mem_alloc(pf);
13260 veb = pf->veb[veb_idx];
13261 veb->flags = flags;
13262 veb->uplink_seid = uplink_seid;
13263 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
13264 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
13266 /* create the VEB in the switch */
13267 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
13270 if (vsi_idx == pf->lan_vsi)
13271 pf->lan_veb = veb->idx;
13276 i40e_veb_clear(veb);
13282 * i40e_setup_pf_switch_element - set PF vars based on switch type
13283 * @pf: board private structure
13284 * @ele: element we are building info from
13285 * @num_reported: total number of elements
13286 * @printconfig: should we print the contents
13288 * helper function to assist in extracting a few useful SEID values.
13290 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
13291 struct i40e_aqc_switch_config_element_resp *ele,
13292 u16 num_reported, bool printconfig)
13294 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
13295 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
13296 u8 element_type = ele->element_type;
13297 u16 seid = le16_to_cpu(ele->seid);
13300 dev_info(&pf->pdev->dev,
13301 "type=%d seid=%d uplink=%d downlink=%d\n",
13302 element_type, seid, uplink_seid, downlink_seid);
13304 switch (element_type) {
13305 case I40E_SWITCH_ELEMENT_TYPE_MAC:
13306 pf->mac_seid = seid;
13308 case I40E_SWITCH_ELEMENT_TYPE_VEB:
13310 if (uplink_seid != pf->mac_seid)
13312 if (pf->lan_veb == I40E_NO_VEB) {
13315 /* find existing or else empty VEB */
13316 for (v = 0; v < I40E_MAX_VEB; v++) {
13317 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
13322 if (pf->lan_veb == I40E_NO_VEB) {
13323 v = i40e_veb_mem_alloc(pf);
13330 pf->veb[pf->lan_veb]->seid = seid;
13331 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
13332 pf->veb[pf->lan_veb]->pf = pf;
13333 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
13335 case I40E_SWITCH_ELEMENT_TYPE_VSI:
13336 if (num_reported != 1)
13338 /* This is immediately after a reset so we can assume this is
13341 pf->mac_seid = uplink_seid;
13342 pf->pf_seid = downlink_seid;
13343 pf->main_vsi_seid = seid;
13345 dev_info(&pf->pdev->dev,
13346 "pf_seid=%d main_vsi_seid=%d\n",
13347 pf->pf_seid, pf->main_vsi_seid);
13349 case I40E_SWITCH_ELEMENT_TYPE_PF:
13350 case I40E_SWITCH_ELEMENT_TYPE_VF:
13351 case I40E_SWITCH_ELEMENT_TYPE_EMP:
13352 case I40E_SWITCH_ELEMENT_TYPE_BMC:
13353 case I40E_SWITCH_ELEMENT_TYPE_PE:
13354 case I40E_SWITCH_ELEMENT_TYPE_PA:
13355 /* ignore these for now */
13358 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
13359 element_type, seid);
13365 * i40e_fetch_switch_configuration - Get switch config from firmware
13366 * @pf: board private structure
13367 * @printconfig: should we print the contents
13369 * Get the current switch configuration from the device and
13370 * extract a few useful SEID values.
13372 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
13374 struct i40e_aqc_get_switch_config_resp *sw_config;
13380 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
13384 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
13386 u16 num_reported, num_total;
13388 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
13392 dev_info(&pf->pdev->dev,
13393 "get switch config failed err %s aq_err %s\n",
13394 i40e_stat_str(&pf->hw, ret),
13395 i40e_aq_str(&pf->hw,
13396 pf->hw.aq.asq_last_status));
13401 num_reported = le16_to_cpu(sw_config->header.num_reported);
13402 num_total = le16_to_cpu(sw_config->header.num_total);
13405 dev_info(&pf->pdev->dev,
13406 "header: %d reported %d total\n",
13407 num_reported, num_total);
13409 for (i = 0; i < num_reported; i++) {
13410 struct i40e_aqc_switch_config_element_resp *ele =
13411 &sw_config->element[i];
13413 i40e_setup_pf_switch_element(pf, ele, num_reported,
13416 } while (next_seid != 0);
13423 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
13424 * @pf: board private structure
13425 * @reinit: if the Main VSI needs to re-initialized.
13427 * Returns 0 on success, negative value on failure
13429 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
13434 /* find out what's out there already */
13435 ret = i40e_fetch_switch_configuration(pf, false);
13437 dev_info(&pf->pdev->dev,
13438 "couldn't fetch switch config, err %s aq_err %s\n",
13439 i40e_stat_str(&pf->hw, ret),
13440 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13443 i40e_pf_reset_stats(pf);
13445 /* set the switch config bit for the whole device to
13446 * support limited promisc or true promisc
13447 * when user requests promisc. The default is limited
13451 if ((pf->hw.pf_id == 0) &&
13452 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
13453 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13454 pf->last_sw_conf_flags = flags;
13457 if (pf->hw.pf_id == 0) {
13460 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13461 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
13463 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
13464 dev_info(&pf->pdev->dev,
13465 "couldn't set switch config bits, err %s aq_err %s\n",
13466 i40e_stat_str(&pf->hw, ret),
13467 i40e_aq_str(&pf->hw,
13468 pf->hw.aq.asq_last_status));
13469 /* not a fatal problem, just keep going */
13471 pf->last_sw_conf_valid_flags = valid_flags;
13474 /* first time setup */
13475 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
13476 struct i40e_vsi *vsi = NULL;
13479 /* Set up the PF VSI associated with the PF's main VSI
13480 * that is already in the HW switch
13482 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
13483 uplink_seid = pf->veb[pf->lan_veb]->seid;
13485 uplink_seid = pf->mac_seid;
13486 if (pf->lan_vsi == I40E_NO_VSI)
13487 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
13489 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
13491 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
13492 i40e_cloud_filter_exit(pf);
13493 i40e_fdir_teardown(pf);
13497 /* force a reset of TC and queue layout configurations */
13498 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13500 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13501 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13502 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13504 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
13506 i40e_fdir_sb_setup(pf);
13508 /* Setup static PF queue filter control settings */
13509 ret = i40e_setup_pf_filter_control(pf);
13511 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
13513 /* Failure here should not stop continuing other steps */
13516 /* enable RSS in the HW, even for only one queue, as the stack can use
13519 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
13520 i40e_pf_config_rss(pf);
13522 /* fill in link information and enable LSE reporting */
13523 i40e_link_event(pf);
13525 /* Initialize user-specific link properties */
13526 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
13527 I40E_AQ_AN_COMPLETED) ? true : false);
13531 /* repopulate tunnel port filters */
13532 i40e_sync_udp_filters(pf);
13538 * i40e_determine_queue_usage - Work out queue distribution
13539 * @pf: board private structure
13541 static void i40e_determine_queue_usage(struct i40e_pf *pf)
13546 pf->num_lan_qps = 0;
13548 /* Find the max queues to be put into basic use. We'll always be
13549 * using TC0, whether or not DCB is running, and TC0 will get the
13552 queues_left = pf->hw.func_caps.num_tx_qp;
13554 if ((queues_left == 1) ||
13555 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
13556 /* one qp for PF, no queues for anything else */
13558 pf->alloc_rss_size = pf->num_lan_qps = 1;
13560 /* make sure all the fancies are disabled */
13561 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13562 I40E_FLAG_IWARP_ENABLED |
13563 I40E_FLAG_FD_SB_ENABLED |
13564 I40E_FLAG_FD_ATR_ENABLED |
13565 I40E_FLAG_DCB_CAPABLE |
13566 I40E_FLAG_DCB_ENABLED |
13567 I40E_FLAG_SRIOV_ENABLED |
13568 I40E_FLAG_VMDQ_ENABLED);
13569 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13570 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
13571 I40E_FLAG_FD_SB_ENABLED |
13572 I40E_FLAG_FD_ATR_ENABLED |
13573 I40E_FLAG_DCB_CAPABLE))) {
13574 /* one qp for PF */
13575 pf->alloc_rss_size = pf->num_lan_qps = 1;
13576 queues_left -= pf->num_lan_qps;
13578 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13579 I40E_FLAG_IWARP_ENABLED |
13580 I40E_FLAG_FD_SB_ENABLED |
13581 I40E_FLAG_FD_ATR_ENABLED |
13582 I40E_FLAG_DCB_ENABLED |
13583 I40E_FLAG_VMDQ_ENABLED);
13584 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13586 /* Not enough queues for all TCs */
13587 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
13588 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
13589 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
13590 I40E_FLAG_DCB_ENABLED);
13591 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
13594 /* limit lan qps to the smaller of qps, cpus or msix */
13595 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
13596 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
13597 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
13598 pf->num_lan_qps = q_max;
13600 queues_left -= pf->num_lan_qps;
13603 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13604 if (queues_left > 1) {
13605 queues_left -= 1; /* save 1 queue for FD */
13607 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
13608 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13609 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
13613 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13614 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
13615 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
13616 (queues_left / pf->num_vf_qps));
13617 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
13620 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
13621 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
13622 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
13623 (queues_left / pf->num_vmdq_qps));
13624 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
13627 pf->queues_left = queues_left;
13628 dev_dbg(&pf->pdev->dev,
13629 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
13630 pf->hw.func_caps.num_tx_qp,
13631 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
13632 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
13633 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
13638 * i40e_setup_pf_filter_control - Setup PF static filter control
13639 * @pf: PF to be setup
13641 * i40e_setup_pf_filter_control sets up a PF's initial filter control
13642 * settings. If PE/FCoE are enabled then it will also set the per PF
13643 * based filter sizes required for them. It also enables Flow director,
13644 * ethertype and macvlan type filter settings for the pf.
13646 * Returns 0 on success, negative on failure
13648 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
13650 struct i40e_filter_control_settings *settings = &pf->filter_settings;
13652 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
13654 /* Flow Director is enabled */
13655 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
13656 settings->enable_fdir = true;
13658 /* Ethtype and MACVLAN filters enabled for PF */
13659 settings->enable_ethtype = true;
13660 settings->enable_macvlan = true;
13662 if (i40e_set_filter_control(&pf->hw, settings))
13668 #define INFO_STRING_LEN 255
13669 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
13670 static void i40e_print_features(struct i40e_pf *pf)
13672 struct i40e_hw *hw = &pf->hw;
13676 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
13680 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
13681 #ifdef CONFIG_PCI_IOV
13682 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
13684 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
13685 pf->hw.func_caps.num_vsis,
13686 pf->vsi[pf->lan_vsi]->num_queue_pairs);
13687 if (pf->flags & I40E_FLAG_RSS_ENABLED)
13688 i += snprintf(&buf[i], REMAIN(i), " RSS");
13689 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
13690 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
13691 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13692 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
13693 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
13695 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
13696 i += snprintf(&buf[i], REMAIN(i), " DCB");
13697 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
13698 i += snprintf(&buf[i], REMAIN(i), " Geneve");
13699 if (pf->flags & I40E_FLAG_PTP)
13700 i += snprintf(&buf[i], REMAIN(i), " PTP");
13701 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
13702 i += snprintf(&buf[i], REMAIN(i), " VEB");
13704 i += snprintf(&buf[i], REMAIN(i), " VEPA");
13706 dev_info(&pf->pdev->dev, "%s\n", buf);
13708 WARN_ON(i > INFO_STRING_LEN);
13712 * i40e_get_platform_mac_addr - get platform-specific MAC address
13713 * @pdev: PCI device information struct
13714 * @pf: board private structure
13716 * Look up the MAC address for the device. First we'll try
13717 * eth_platform_get_mac_address, which will check Open Firmware, or arch
13718 * specific fallback. Otherwise, we'll default to the stored value in
13721 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
13723 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
13724 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
13728 * i40e_probe - Device initialization routine
13729 * @pdev: PCI device information struct
13730 * @ent: entry in i40e_pci_tbl
13732 * i40e_probe initializes a PF identified by a pci_dev structure.
13733 * The OS initialization, configuring of the PF private structure,
13734 * and a hardware reset occur.
13736 * Returns 0 on success, negative on failure
13738 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
13740 struct i40e_aq_get_phy_abilities_resp abilities;
13741 struct i40e_pf *pf;
13742 struct i40e_hw *hw;
13743 static u16 pfs_found;
13750 err = pci_enable_device_mem(pdev);
13754 /* set up for high or low dma */
13755 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
13757 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
13759 dev_err(&pdev->dev,
13760 "DMA configuration failed: 0x%x\n", err);
13765 /* set up pci connections */
13766 err = pci_request_mem_regions(pdev, i40e_driver_name);
13768 dev_info(&pdev->dev,
13769 "pci_request_selected_regions failed %d\n", err);
13773 pci_enable_pcie_error_reporting(pdev);
13774 pci_set_master(pdev);
13776 /* Now that we have a PCI connection, we need to do the
13777 * low level device setup. This is primarily setting up
13778 * the Admin Queue structures and then querying for the
13779 * device's current profile information.
13781 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
13788 set_bit(__I40E_DOWN, pf->state);
13793 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
13794 I40E_MAX_CSR_SPACE);
13796 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
13797 if (!hw->hw_addr) {
13799 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
13800 (unsigned int)pci_resource_start(pdev, 0),
13801 pf->ioremap_len, err);
13804 hw->vendor_id = pdev->vendor;
13805 hw->device_id = pdev->device;
13806 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
13807 hw->subsystem_vendor_id = pdev->subsystem_vendor;
13808 hw->subsystem_device_id = pdev->subsystem_device;
13809 hw->bus.device = PCI_SLOT(pdev->devfn);
13810 hw->bus.func = PCI_FUNC(pdev->devfn);
13811 hw->bus.bus_id = pdev->bus->number;
13812 pf->instance = pfs_found;
13814 /* Select something other than the 802.1ad ethertype for the
13815 * switch to use internally and drop on ingress.
13817 hw->switch_tag = 0xffff;
13818 hw->first_tag = ETH_P_8021AD;
13819 hw->second_tag = ETH_P_8021Q;
13821 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
13822 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
13824 /* set up the locks for the AQ, do this only once in probe
13825 * and destroy them only once in remove
13827 mutex_init(&hw->aq.asq_mutex);
13828 mutex_init(&hw->aq.arq_mutex);
13830 pf->msg_enable = netif_msg_init(debug,
13835 pf->hw.debug_mask = debug;
13837 /* do a special CORER for clearing PXE mode once at init */
13838 if (hw->revision_id == 0 &&
13839 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
13840 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
13845 i40e_clear_pxe_mode(hw);
13848 /* Reset here to make sure all is clean and to define PF 'n' */
13850 err = i40e_pf_reset(hw);
13852 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
13857 hw->aq.num_arq_entries = I40E_AQ_LEN;
13858 hw->aq.num_asq_entries = I40E_AQ_LEN;
13859 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
13860 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
13861 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
13863 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
13865 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
13867 err = i40e_init_shared_code(hw);
13869 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
13874 /* set up a default setting for link flow control */
13875 pf->hw.fc.requested_mode = I40E_FC_NONE;
13877 err = i40e_init_adminq(hw);
13879 if (err == I40E_ERR_FIRMWARE_API_VERSION)
13880 dev_info(&pdev->dev,
13881 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
13883 dev_info(&pdev->dev,
13884 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
13888 i40e_get_oem_version(hw);
13890 /* provide nvm, fw, api versions */
13891 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
13892 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
13893 hw->aq.api_maj_ver, hw->aq.api_min_ver,
13894 i40e_nvm_version_str(hw));
13896 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
13897 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
13898 dev_info(&pdev->dev,
13899 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
13900 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
13901 dev_info(&pdev->dev,
13902 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
13904 i40e_verify_eeprom(pf);
13906 /* Rev 0 hardware was never productized */
13907 if (hw->revision_id < 1)
13908 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
13910 i40e_clear_pxe_mode(hw);
13911 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
13913 goto err_adminq_setup;
13915 err = i40e_sw_init(pf);
13917 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
13921 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
13922 hw->func_caps.num_rx_qp, 0, 0);
13924 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
13925 goto err_init_lan_hmc;
13928 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
13930 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
13932 goto err_configure_lan_hmc;
13935 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
13936 * Ignore error return codes because if it was already disabled via
13937 * hardware settings this will fail
13939 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
13940 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
13941 i40e_aq_stop_lldp(hw, true, NULL);
13944 /* allow a platform config to override the HW addr */
13945 i40e_get_platform_mac_addr(pdev, pf);
13947 if (!is_valid_ether_addr(hw->mac.addr)) {
13948 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
13952 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
13953 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
13954 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
13955 if (is_valid_ether_addr(hw->mac.port_addr))
13956 pf->hw_features |= I40E_HW_PORT_ID_VALID;
13958 pci_set_drvdata(pdev, pf);
13959 pci_save_state(pdev);
13961 /* Enable FW to write default DCB config on link-up */
13962 i40e_aq_set_dcb_parameters(hw, true, NULL);
13964 #ifdef CONFIG_I40E_DCB
13965 err = i40e_init_pf_dcb(pf);
13967 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
13968 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
13969 /* Continue without DCB enabled */
13971 #endif /* CONFIG_I40E_DCB */
13973 /* set up periodic task facility */
13974 timer_setup(&pf->service_timer, i40e_service_timer, 0);
13975 pf->service_timer_period = HZ;
13977 INIT_WORK(&pf->service_task, i40e_service_task);
13978 clear_bit(__I40E_SERVICE_SCHED, pf->state);
13980 /* NVM bit on means WoL disabled for the port */
13981 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
13982 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
13983 pf->wol_en = false;
13986 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
13988 /* set up the main switch operations */
13989 i40e_determine_queue_usage(pf);
13990 err = i40e_init_interrupt_scheme(pf);
13992 goto err_switch_setup;
13994 /* The number of VSIs reported by the FW is the minimum guaranteed
13995 * to us; HW supports far more and we share the remaining pool with
13996 * the other PFs. We allocate space for more than the guarantee with
13997 * the understanding that we might not get them all later.
13999 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14000 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14002 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14004 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
14005 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
14009 goto err_switch_setup;
14012 #ifdef CONFIG_PCI_IOV
14013 /* prep for VF support */
14014 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14015 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
14016 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
14017 if (pci_num_vf(pdev))
14018 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
14021 err = i40e_setup_pf_switch(pf, false);
14023 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
14026 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
14028 /* if FDIR VSI was set up, start it now */
14029 for (i = 0; i < pf->num_alloc_vsi; i++) {
14030 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
14031 i40e_vsi_open(pf->vsi[i]);
14036 /* The driver only wants link up/down and module qualification
14037 * reports from firmware. Note the negative logic.
14039 err = i40e_aq_set_phy_int_mask(&pf->hw,
14040 ~(I40E_AQ_EVENT_LINK_UPDOWN |
14041 I40E_AQ_EVENT_MEDIA_NA |
14042 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
14044 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
14045 i40e_stat_str(&pf->hw, err),
14046 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14048 /* Reconfigure hardware for allowing smaller MSS in the case
14049 * of TSO, so that we avoid the MDD being fired and causing
14050 * a reset in the case of small MSS+TSO.
14052 val = rd32(hw, I40E_REG_MSS);
14053 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
14054 val &= ~I40E_REG_MSS_MIN_MASK;
14055 val |= I40E_64BYTE_MSS;
14056 wr32(hw, I40E_REG_MSS, val);
14059 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
14061 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
14063 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
14064 i40e_stat_str(&pf->hw, err),
14065 i40e_aq_str(&pf->hw,
14066 pf->hw.aq.asq_last_status));
14068 /* The main driver is (mostly) up and happy. We need to set this state
14069 * before setting up the misc vector or we get a race and the vector
14070 * ends up disabled forever.
14072 clear_bit(__I40E_DOWN, pf->state);
14074 /* In case of MSIX we are going to setup the misc vector right here
14075 * to handle admin queue events etc. In case of legacy and MSI
14076 * the misc functionality and queue processing is combined in
14077 * the same vector and that gets setup at open.
14079 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
14080 err = i40e_setup_misc_vector(pf);
14082 dev_info(&pdev->dev,
14083 "setup of misc vector failed: %d\n", err);
14084 i40e_cloud_filter_exit(pf);
14085 i40e_fdir_teardown(pf);
14090 #ifdef CONFIG_PCI_IOV
14091 /* prep for VF support */
14092 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14093 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
14094 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
14095 /* disable link interrupts for VFs */
14096 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
14097 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
14098 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
14101 if (pci_num_vf(pdev)) {
14102 dev_info(&pdev->dev,
14103 "Active VFs found, allocating resources.\n");
14104 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
14106 dev_info(&pdev->dev,
14107 "Error %d allocating resources for existing VFs\n",
14111 #endif /* CONFIG_PCI_IOV */
14113 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14114 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
14115 pf->num_iwarp_msix,
14116 I40E_IWARP_IRQ_PILE_ID);
14117 if (pf->iwarp_base_vector < 0) {
14118 dev_info(&pdev->dev,
14119 "failed to get tracking for %d vectors for IWARP err=%d\n",
14120 pf->num_iwarp_msix, pf->iwarp_base_vector);
14121 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
14125 i40e_dbg_pf_init(pf);
14127 /* tell the firmware that we're starting */
14128 i40e_send_version(pf);
14130 /* since everything's happy, start the service_task timer */
14131 mod_timer(&pf->service_timer,
14132 round_jiffies(jiffies + pf->service_timer_period));
14134 /* add this PF to client device list and launch a client service task */
14135 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14136 err = i40e_lan_add_device(pf);
14138 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
14142 #define PCI_SPEED_SIZE 8
14143 #define PCI_WIDTH_SIZE 8
14144 /* Devices on the IOSF bus do not have this information
14145 * and will report PCI Gen 1 x 1 by default so don't bother
14148 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
14149 char speed[PCI_SPEED_SIZE] = "Unknown";
14150 char width[PCI_WIDTH_SIZE] = "Unknown";
14152 /* Get the negotiated link width and speed from PCI config
14155 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
14158 i40e_set_pci_config_data(hw, link_status);
14160 switch (hw->bus.speed) {
14161 case i40e_bus_speed_8000:
14162 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
14163 case i40e_bus_speed_5000:
14164 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
14165 case i40e_bus_speed_2500:
14166 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
14170 switch (hw->bus.width) {
14171 case i40e_bus_width_pcie_x8:
14172 strncpy(width, "8", PCI_WIDTH_SIZE); break;
14173 case i40e_bus_width_pcie_x4:
14174 strncpy(width, "4", PCI_WIDTH_SIZE); break;
14175 case i40e_bus_width_pcie_x2:
14176 strncpy(width, "2", PCI_WIDTH_SIZE); break;
14177 case i40e_bus_width_pcie_x1:
14178 strncpy(width, "1", PCI_WIDTH_SIZE); break;
14183 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
14186 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
14187 hw->bus.speed < i40e_bus_speed_8000) {
14188 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
14189 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
14193 /* get the requested speeds from the fw */
14194 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
14196 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
14197 i40e_stat_str(&pf->hw, err),
14198 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14199 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
14201 /* get the supported phy types from the fw */
14202 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
14204 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
14205 i40e_stat_str(&pf->hw, err),
14206 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14208 /* Add a filter to drop all Flow control frames from any VSI from being
14209 * transmitted. By doing so we stop a malicious VF from sending out
14210 * PAUSE or PFC frames and potentially controlling traffic for other
14212 * The FW can still send Flow control frames if enabled.
14214 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
14215 pf->main_vsi_seid);
14217 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
14218 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
14219 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
14220 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
14221 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
14222 /* print a string summarizing features */
14223 i40e_print_features(pf);
14227 /* Unwind what we've done if something failed in the setup */
14229 set_bit(__I40E_DOWN, pf->state);
14230 i40e_clear_interrupt_scheme(pf);
14233 i40e_reset_interrupt_capability(pf);
14234 del_timer_sync(&pf->service_timer);
14236 err_configure_lan_hmc:
14237 (void)i40e_shutdown_lan_hmc(hw);
14239 kfree(pf->qp_pile);
14243 iounmap(hw->hw_addr);
14247 pci_disable_pcie_error_reporting(pdev);
14248 pci_release_mem_regions(pdev);
14251 pci_disable_device(pdev);
14256 * i40e_remove - Device removal routine
14257 * @pdev: PCI device information struct
14259 * i40e_remove is called by the PCI subsystem to alert the driver
14260 * that is should release a PCI device. This could be caused by a
14261 * Hot-Plug event, or because the driver is going to be removed from
14264 static void i40e_remove(struct pci_dev *pdev)
14266 struct i40e_pf *pf = pci_get_drvdata(pdev);
14267 struct i40e_hw *hw = &pf->hw;
14268 i40e_status ret_code;
14271 i40e_dbg_pf_exit(pf);
14275 /* Disable RSS in hw */
14276 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
14277 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
14279 while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
14280 usleep_range(1000, 2000);
14282 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
14283 set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
14285 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
14287 /* no more scheduling of any task */
14288 set_bit(__I40E_SUSPENDED, pf->state);
14289 set_bit(__I40E_DOWN, pf->state);
14290 if (pf->service_timer.function)
14291 del_timer_sync(&pf->service_timer);
14292 if (pf->service_task.func)
14293 cancel_work_sync(&pf->service_task);
14295 /* Client close must be called explicitly here because the timer
14296 * has been stopped.
14298 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14300 i40e_fdir_teardown(pf);
14302 /* If there is a switch structure or any orphans, remove them.
14303 * This will leave only the PF's VSI remaining.
14305 for (i = 0; i < I40E_MAX_VEB; i++) {
14309 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
14310 pf->veb[i]->uplink_seid == 0)
14311 i40e_switch_branch_release(pf->veb[i]);
14314 /* Now we can shutdown the PF's VSI, just before we kill
14317 if (pf->vsi[pf->lan_vsi])
14318 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
14320 i40e_cloud_filter_exit(pf);
14322 /* remove attached clients */
14323 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14324 ret_code = i40e_lan_del_device(pf);
14326 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
14330 /* shutdown and destroy the HMC */
14331 if (hw->hmc.hmc_obj) {
14332 ret_code = i40e_shutdown_lan_hmc(hw);
14334 dev_warn(&pdev->dev,
14335 "Failed to destroy the HMC resources: %d\n",
14339 /* shutdown the adminq */
14340 i40e_shutdown_adminq(hw);
14342 /* destroy the locks only once, here */
14343 mutex_destroy(&hw->aq.arq_mutex);
14344 mutex_destroy(&hw->aq.asq_mutex);
14346 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
14348 i40e_clear_interrupt_scheme(pf);
14349 for (i = 0; i < pf->num_alloc_vsi; i++) {
14351 i40e_vsi_clear_rings(pf->vsi[i]);
14352 i40e_vsi_clear(pf->vsi[i]);
14358 for (i = 0; i < I40E_MAX_VEB; i++) {
14363 kfree(pf->qp_pile);
14366 iounmap(hw->hw_addr);
14368 pci_release_mem_regions(pdev);
14370 pci_disable_pcie_error_reporting(pdev);
14371 pci_disable_device(pdev);
14375 * i40e_pci_error_detected - warning that something funky happened in PCI land
14376 * @pdev: PCI device information struct
14377 * @error: the type of PCI error
14379 * Called to warn that something happened and the error handling steps
14380 * are in progress. Allows the driver to quiesce things, be ready for
14383 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
14384 enum pci_channel_state error)
14386 struct i40e_pf *pf = pci_get_drvdata(pdev);
14388 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
14391 dev_info(&pdev->dev,
14392 "Cannot recover - error happened during device probe\n");
14393 return PCI_ERS_RESULT_DISCONNECT;
14396 /* shutdown all operations */
14397 if (!test_bit(__I40E_SUSPENDED, pf->state))
14398 i40e_prep_for_reset(pf, false);
14400 /* Request a slot reset */
14401 return PCI_ERS_RESULT_NEED_RESET;
14405 * i40e_pci_error_slot_reset - a PCI slot reset just happened
14406 * @pdev: PCI device information struct
14408 * Called to find if the driver can work with the device now that
14409 * the pci slot has been reset. If a basic connection seems good
14410 * (registers are readable and have sane content) then return a
14411 * happy little PCI_ERS_RESULT_xxx.
14413 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
14415 struct i40e_pf *pf = pci_get_drvdata(pdev);
14416 pci_ers_result_t result;
14420 dev_dbg(&pdev->dev, "%s\n", __func__);
14421 if (pci_enable_device_mem(pdev)) {
14422 dev_info(&pdev->dev,
14423 "Cannot re-enable PCI device after reset.\n");
14424 result = PCI_ERS_RESULT_DISCONNECT;
14426 pci_set_master(pdev);
14427 pci_restore_state(pdev);
14428 pci_save_state(pdev);
14429 pci_wake_from_d3(pdev, false);
14431 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
14433 result = PCI_ERS_RESULT_RECOVERED;
14435 result = PCI_ERS_RESULT_DISCONNECT;
14438 err = pci_cleanup_aer_uncorrect_error_status(pdev);
14440 dev_info(&pdev->dev,
14441 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
14443 /* non-fatal, continue */
14450 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
14451 * @pdev: PCI device information struct
14453 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
14455 struct i40e_pf *pf = pci_get_drvdata(pdev);
14457 i40e_prep_for_reset(pf, false);
14461 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
14462 * @pdev: PCI device information struct
14464 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
14466 struct i40e_pf *pf = pci_get_drvdata(pdev);
14468 i40e_reset_and_rebuild(pf, false, false);
14472 * i40e_pci_error_resume - restart operations after PCI error recovery
14473 * @pdev: PCI device information struct
14475 * Called to allow the driver to bring things back up after PCI error
14476 * and/or reset recovery has finished.
14478 static void i40e_pci_error_resume(struct pci_dev *pdev)
14480 struct i40e_pf *pf = pci_get_drvdata(pdev);
14482 dev_dbg(&pdev->dev, "%s\n", __func__);
14483 if (test_bit(__I40E_SUSPENDED, pf->state))
14486 i40e_handle_reset_warning(pf, false);
14490 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
14491 * using the mac_address_write admin q function
14492 * @pf: pointer to i40e_pf struct
14494 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
14496 struct i40e_hw *hw = &pf->hw;
14501 /* Get current MAC address in case it's an LAA */
14502 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
14503 ether_addr_copy(mac_addr,
14504 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
14506 dev_err(&pf->pdev->dev,
14507 "Failed to retrieve MAC address; using default\n");
14508 ether_addr_copy(mac_addr, hw->mac.addr);
14511 /* The FW expects the mac address write cmd to first be called with
14512 * one of these flags before calling it again with the multicast
14515 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
14517 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
14518 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
14520 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14522 dev_err(&pf->pdev->dev,
14523 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
14527 flags = I40E_AQC_MC_MAG_EN
14528 | I40E_AQC_WOL_PRESERVE_ON_PFR
14529 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
14530 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14532 dev_err(&pf->pdev->dev,
14533 "Failed to enable Multicast Magic Packet wake up\n");
14537 * i40e_shutdown - PCI callback for shutting down
14538 * @pdev: PCI device information struct
14540 static void i40e_shutdown(struct pci_dev *pdev)
14542 struct i40e_pf *pf = pci_get_drvdata(pdev);
14543 struct i40e_hw *hw = &pf->hw;
14545 set_bit(__I40E_SUSPENDED, pf->state);
14546 set_bit(__I40E_DOWN, pf->state);
14548 del_timer_sync(&pf->service_timer);
14549 cancel_work_sync(&pf->service_task);
14550 i40e_cloud_filter_exit(pf);
14551 i40e_fdir_teardown(pf);
14553 /* Client close must be called explicitly here because the timer
14554 * has been stopped.
14556 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14558 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
14559 i40e_enable_mc_magic_wake(pf);
14561 i40e_prep_for_reset(pf, false);
14563 wr32(hw, I40E_PFPM_APM,
14564 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14565 wr32(hw, I40E_PFPM_WUFC,
14566 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14568 /* Since we're going to destroy queues during the
14569 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
14573 i40e_clear_interrupt_scheme(pf);
14576 if (system_state == SYSTEM_POWER_OFF) {
14577 pci_wake_from_d3(pdev, pf->wol_en);
14578 pci_set_power_state(pdev, PCI_D3hot);
14583 * i40e_suspend - PM callback for moving to D3
14584 * @dev: generic device information structure
14586 static int __maybe_unused i40e_suspend(struct device *dev)
14588 struct pci_dev *pdev = to_pci_dev(dev);
14589 struct i40e_pf *pf = pci_get_drvdata(pdev);
14590 struct i40e_hw *hw = &pf->hw;
14592 /* If we're already suspended, then there is nothing to do */
14593 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
14596 set_bit(__I40E_DOWN, pf->state);
14598 /* Ensure service task will not be running */
14599 del_timer_sync(&pf->service_timer);
14600 cancel_work_sync(&pf->service_task);
14602 /* Client close must be called explicitly here because the timer
14603 * has been stopped.
14605 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14607 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
14608 i40e_enable_mc_magic_wake(pf);
14610 /* Since we're going to destroy queues during the
14611 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
14616 i40e_prep_for_reset(pf, true);
14618 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14619 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14621 /* Clear the interrupt scheme and release our IRQs so that the system
14622 * can safely hibernate even when there are a large number of CPUs.
14623 * Otherwise hibernation might fail when mapping all the vectors back
14626 i40e_clear_interrupt_scheme(pf);
14634 * i40e_resume - PM callback for waking up from D3
14635 * @dev: generic device information structure
14637 static int __maybe_unused i40e_resume(struct device *dev)
14639 struct pci_dev *pdev = to_pci_dev(dev);
14640 struct i40e_pf *pf = pci_get_drvdata(pdev);
14643 /* If we're not suspended, then there is nothing to do */
14644 if (!test_bit(__I40E_SUSPENDED, pf->state))
14647 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
14648 * since we're going to be restoring queues
14652 /* We cleared the interrupt scheme when we suspended, so we need to
14653 * restore it now to resume device functionality.
14655 err = i40e_restore_interrupt_scheme(pf);
14657 dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
14661 clear_bit(__I40E_DOWN, pf->state);
14662 i40e_reset_and_rebuild(pf, false, true);
14666 /* Clear suspended state last after everything is recovered */
14667 clear_bit(__I40E_SUSPENDED, pf->state);
14669 /* Restart the service task */
14670 mod_timer(&pf->service_timer,
14671 round_jiffies(jiffies + pf->service_timer_period));
14676 static const struct pci_error_handlers i40e_err_handler = {
14677 .error_detected = i40e_pci_error_detected,
14678 .slot_reset = i40e_pci_error_slot_reset,
14679 .reset_prepare = i40e_pci_error_reset_prepare,
14680 .reset_done = i40e_pci_error_reset_done,
14681 .resume = i40e_pci_error_resume,
14684 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
14686 static struct pci_driver i40e_driver = {
14687 .name = i40e_driver_name,
14688 .id_table = i40e_pci_tbl,
14689 .probe = i40e_probe,
14690 .remove = i40e_remove,
14692 .pm = &i40e_pm_ops,
14694 .shutdown = i40e_shutdown,
14695 .err_handler = &i40e_err_handler,
14696 .sriov_configure = i40e_pci_sriov_configure,
14700 * i40e_init_module - Driver registration routine
14702 * i40e_init_module is the first routine called when the driver is
14703 * loaded. All it does is register with the PCI subsystem.
14705 static int __init i40e_init_module(void)
14707 pr_info("%s: %s - version %s\n", i40e_driver_name,
14708 i40e_driver_string, i40e_driver_version_str);
14709 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
14711 /* There is no need to throttle the number of active tasks because
14712 * each device limits its own task using a state bit for scheduling
14713 * the service task, and the device tasks do not interfere with each
14714 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
14715 * since we need to be able to guarantee forward progress even under
14718 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
14720 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
14725 return pci_register_driver(&i40e_driver);
14727 module_init(i40e_init_module);
14730 * i40e_exit_module - Driver exit cleanup routine
14732 * i40e_exit_module is called just before the driver is removed
14735 static void __exit i40e_exit_module(void)
14737 pci_unregister_driver(&i40e_driver);
14738 destroy_workqueue(i40e_wq);
14741 module_exit(i40e_exit_module);