1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
5 #include "i40e_prototype.h"
6 #include "i40evf_client.h"
7 /* All i40evf tracepoints are defined by the include below, which must
8 * be included exactly once across the whole kernel with
9 * CREATE_TRACE_POINTS defined
11 #define CREATE_TRACE_POINTS
12 #include "i40e_trace.h"
14 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
15 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
16 static int i40evf_close(struct net_device *netdev);
18 char i40evf_driver_name[] = "i40evf";
19 static const char i40evf_driver_string[] =
20 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
24 #define DRV_VERSION_MAJOR 3
25 #define DRV_VERSION_MINOR 2
26 #define DRV_VERSION_BUILD 3
27 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
28 __stringify(DRV_VERSION_MINOR) "." \
29 __stringify(DRV_VERSION_BUILD) \
31 const char i40evf_driver_version[] = DRV_VERSION;
32 static const char i40evf_copyright[] =
33 "Copyright (c) 2013 - 2018 Intel Corporation.";
35 /* i40evf_pci_tbl - PCI Device ID Table
37 * Wildcard entries (PCI_ANY_ID) should come last
38 * Last entry must be all 0s
40 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
41 * Class, Class Mask, private data (not used) }
43 static const struct pci_device_id i40evf_pci_tbl[] = {
44 {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
45 {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
46 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
47 {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0},
48 /* required last entry */
52 MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
54 MODULE_ALIAS("i40evf");
55 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
56 MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(DRV_VERSION);
60 static struct workqueue_struct *i40evf_wq;
63 * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
64 * @hw: pointer to the HW structure
65 * @mem: ptr to mem struct to fill out
66 * @size: size of memory requested
67 * @alignment: what to align the allocation to
69 i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
70 struct i40e_dma_mem *mem,
71 u64 size, u32 alignment)
73 struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
76 return I40E_ERR_PARAM;
78 mem->size = ALIGN(size, alignment);
79 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
80 (dma_addr_t *)&mem->pa, GFP_KERNEL);
84 return I40E_ERR_NO_MEMORY;
88 * i40evf_free_dma_mem_d - OS specific memory free for shared code
89 * @hw: pointer to the HW structure
90 * @mem: ptr to mem struct to free
92 i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
94 struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
97 return I40E_ERR_PARAM;
98 dma_free_coherent(&adapter->pdev->dev, mem->size,
99 mem->va, (dma_addr_t)mem->pa);
104 * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
105 * @hw: pointer to the HW structure
106 * @mem: ptr to mem struct to fill out
107 * @size: size of memory requested
109 i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
110 struct i40e_virt_mem *mem, u32 size)
113 return I40E_ERR_PARAM;
116 mem->va = kzalloc(size, GFP_KERNEL);
121 return I40E_ERR_NO_MEMORY;
125 * i40evf_free_virt_mem_d - OS specific memory free for shared code
126 * @hw: pointer to the HW structure
127 * @mem: ptr to mem struct to free
129 i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
130 struct i40e_virt_mem *mem)
133 return I40E_ERR_PARAM;
135 /* it's ok to kfree a NULL pointer */
142 * i40evf_debug_d - OS dependent version of debug printing
143 * @hw: pointer to the HW structure
144 * @mask: debug level mask
145 * @fmt_str: printf-type format description
147 void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
152 if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
155 va_start(argptr, fmt_str);
156 vsnprintf(buf, sizeof(buf), fmt_str, argptr);
159 /* the debug string is already formatted with a newline */
164 * i40evf_schedule_reset - Set the flags and schedule a reset event
165 * @adapter: board private structure
167 void i40evf_schedule_reset(struct i40evf_adapter *adapter)
169 if (!(adapter->flags &
170 (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) {
171 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
172 schedule_work(&adapter->reset_task);
177 * i40evf_tx_timeout - Respond to a Tx Hang
178 * @netdev: network interface device structure
180 static void i40evf_tx_timeout(struct net_device *netdev)
182 struct i40evf_adapter *adapter = netdev_priv(netdev);
184 adapter->tx_timeout_count++;
185 i40evf_schedule_reset(adapter);
189 * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
190 * @adapter: board private structure
192 static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
194 struct i40e_hw *hw = &adapter->hw;
196 if (!adapter->msix_entries)
199 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
202 rd32(hw, I40E_VFGEN_RSTAT);
204 synchronize_irq(adapter->msix_entries[0].vector);
208 * i40evf_misc_irq_enable - Enable default interrupt generation settings
209 * @adapter: board private structure
211 static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
213 struct i40e_hw *hw = &adapter->hw;
215 wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
216 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
217 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
220 rd32(hw, I40E_VFGEN_RSTAT);
224 * i40evf_irq_disable - Mask off interrupt generation on the NIC
225 * @adapter: board private structure
227 static void i40evf_irq_disable(struct i40evf_adapter *adapter)
230 struct i40e_hw *hw = &adapter->hw;
232 if (!adapter->msix_entries)
235 for (i = 1; i < adapter->num_msix_vectors; i++) {
236 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
237 synchronize_irq(adapter->msix_entries[i].vector);
240 rd32(hw, I40E_VFGEN_RSTAT);
244 * i40evf_irq_enable_queues - Enable interrupt for specified queues
245 * @adapter: board private structure
246 * @mask: bitmap of queues to enable
248 void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
250 struct i40e_hw *hw = &adapter->hw;
253 for (i = 1; i < adapter->num_msix_vectors; i++) {
254 if (mask & BIT(i - 1)) {
255 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
256 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
257 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
263 * i40evf_irq_enable - Enable default interrupt generation settings
264 * @adapter: board private structure
265 * @flush: boolean value whether to run rd32()
267 void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
269 struct i40e_hw *hw = &adapter->hw;
271 i40evf_misc_irq_enable(adapter);
272 i40evf_irq_enable_queues(adapter, ~0);
275 rd32(hw, I40E_VFGEN_RSTAT);
279 * i40evf_msix_aq - Interrupt handler for vector 0
280 * @irq: interrupt number
281 * @data: pointer to netdev
283 static irqreturn_t i40evf_msix_aq(int irq, void *data)
285 struct net_device *netdev = data;
286 struct i40evf_adapter *adapter = netdev_priv(netdev);
287 struct i40e_hw *hw = &adapter->hw;
289 /* handle non-queue interrupts, these reads clear the registers */
290 rd32(hw, I40E_VFINT_ICR01);
291 rd32(hw, I40E_VFINT_ICR0_ENA1);
293 /* schedule work on the private workqueue */
294 schedule_work(&adapter->adminq_task);
300 * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
301 * @irq: interrupt number
302 * @data: pointer to a q_vector
304 static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
306 struct i40e_q_vector *q_vector = data;
308 if (!q_vector->tx.ring && !q_vector->rx.ring)
311 napi_schedule_irqoff(&q_vector->napi);
317 * i40evf_map_vector_to_rxq - associate irqs with rx queues
318 * @adapter: board private structure
319 * @v_idx: interrupt number
320 * @r_idx: queue number
323 i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
325 struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
326 struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
327 struct i40e_hw *hw = &adapter->hw;
329 rx_ring->q_vector = q_vector;
330 rx_ring->next = q_vector->rx.ring;
331 rx_ring->vsi = &adapter->vsi;
332 q_vector->rx.ring = rx_ring;
333 q_vector->rx.count++;
334 q_vector->rx.next_update = jiffies + 1;
335 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
336 q_vector->ring_mask |= BIT(r_idx);
337 wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx),
338 q_vector->rx.current_itr);
339 q_vector->rx.current_itr = q_vector->rx.target_itr;
343 * i40evf_map_vector_to_txq - associate irqs with tx queues
344 * @adapter: board private structure
345 * @v_idx: interrupt number
346 * @t_idx: queue number
349 i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
351 struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
352 struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
353 struct i40e_hw *hw = &adapter->hw;
355 tx_ring->q_vector = q_vector;
356 tx_ring->next = q_vector->tx.ring;
357 tx_ring->vsi = &adapter->vsi;
358 q_vector->tx.ring = tx_ring;
359 q_vector->tx.count++;
360 q_vector->tx.next_update = jiffies + 1;
361 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
362 q_vector->num_ringpairs++;
363 wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx),
364 q_vector->tx.target_itr);
365 q_vector->tx.current_itr = q_vector->tx.target_itr;
369 * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
370 * @adapter: board private structure to initialize
372 * This function maps descriptor rings to the queue-specific vectors
373 * we were allotted through the MSI-X enabling code. Ideally, we'd have
374 * one vector per ring/queue, but on a constrained vector budget, we
375 * group the rings as "efficiently" as possible. You would add new
376 * mapping configurations in here.
378 static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
380 int rings_remaining = adapter->num_active_queues;
381 int ridx = 0, vidx = 0;
384 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
386 for (; ridx < rings_remaining; ridx++) {
387 i40evf_map_vector_to_rxq(adapter, vidx, ridx);
388 i40evf_map_vector_to_txq(adapter, vidx, ridx);
390 /* In the case where we have more queues than vectors, continue
391 * round-robin on vectors until all queues are mapped.
393 if (++vidx >= q_vectors)
397 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
401 * i40evf_irq_affinity_notify - Callback for affinity changes
402 * @notify: context as to what irq was changed
403 * @mask: the new affinity mask
405 * This is a callback function used by the irq_set_affinity_notifier function
406 * so that we may register to receive changes to the irq affinity masks.
408 static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
409 const cpumask_t *mask)
411 struct i40e_q_vector *q_vector =
412 container_of(notify, struct i40e_q_vector, affinity_notify);
414 cpumask_copy(&q_vector->affinity_mask, mask);
418 * i40evf_irq_affinity_release - Callback for affinity notifier release
419 * @ref: internal core kernel usage
421 * This is a callback function used by the irq_set_affinity_notifier function
422 * to inform the current notification subscriber that they will no longer
423 * receive notifications.
425 static void i40evf_irq_affinity_release(struct kref *ref) {}
428 * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
429 * @adapter: board private structure
430 * @basename: device basename
432 * Allocates MSI-X vectors for tx and rx handling, and requests
433 * interrupts from the kernel.
436 i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
438 unsigned int vector, q_vectors;
439 unsigned int rx_int_idx = 0, tx_int_idx = 0;
443 i40evf_irq_disable(adapter);
444 /* Decrement for Other and TCP Timer vectors */
445 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
447 for (vector = 0; vector < q_vectors; vector++) {
448 struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
449 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
451 if (q_vector->tx.ring && q_vector->rx.ring) {
452 snprintf(q_vector->name, sizeof(q_vector->name),
453 "i40evf-%s-TxRx-%d", basename, rx_int_idx++);
455 } else if (q_vector->rx.ring) {
456 snprintf(q_vector->name, sizeof(q_vector->name),
457 "i40evf-%s-rx-%d", basename, rx_int_idx++);
458 } else if (q_vector->tx.ring) {
459 snprintf(q_vector->name, sizeof(q_vector->name),
460 "i40evf-%s-tx-%d", basename, tx_int_idx++);
462 /* skip this unused q_vector */
465 err = request_irq(irq_num,
466 i40evf_msix_clean_rings,
471 dev_info(&adapter->pdev->dev,
472 "Request_irq failed, error: %d\n", err);
473 goto free_queue_irqs;
475 /* register for affinity change notifications */
476 q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
477 q_vector->affinity_notify.release =
478 i40evf_irq_affinity_release;
479 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
480 /* Spread the IRQ affinity hints across online CPUs. Note that
481 * get_cpu_mask returns a mask with a permanent lifetime so
482 * it's safe to use as a hint for irq_set_affinity_hint.
484 cpu = cpumask_local_spread(q_vector->v_idx, -1);
485 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
493 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
494 irq_set_affinity_notifier(irq_num, NULL);
495 irq_set_affinity_hint(irq_num, NULL);
496 free_irq(irq_num, &adapter->q_vectors[vector]);
502 * i40evf_request_misc_irq - Initialize MSI-X interrupts
503 * @adapter: board private structure
505 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
506 * vector is only for the admin queue, and stays active even when the netdev
509 static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
511 struct net_device *netdev = adapter->netdev;
514 snprintf(adapter->misc_vector_name,
515 sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx",
516 dev_name(&adapter->pdev->dev));
517 err = request_irq(adapter->msix_entries[0].vector,
519 adapter->misc_vector_name, netdev);
521 dev_err(&adapter->pdev->dev,
522 "request_irq for %s failed: %d\n",
523 adapter->misc_vector_name, err);
524 free_irq(adapter->msix_entries[0].vector, netdev);
530 * i40evf_free_traffic_irqs - Free MSI-X interrupts
531 * @adapter: board private structure
533 * Frees all MSI-X vectors other than 0.
535 static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
537 int vector, irq_num, q_vectors;
539 if (!adapter->msix_entries)
542 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
544 for (vector = 0; vector < q_vectors; vector++) {
545 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
546 irq_set_affinity_notifier(irq_num, NULL);
547 irq_set_affinity_hint(irq_num, NULL);
548 free_irq(irq_num, &adapter->q_vectors[vector]);
553 * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
554 * @adapter: board private structure
556 * Frees MSI-X vector 0.
558 static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
560 struct net_device *netdev = adapter->netdev;
562 if (!adapter->msix_entries)
565 free_irq(adapter->msix_entries[0].vector, netdev);
569 * i40evf_configure_tx - Configure Transmit Unit after Reset
570 * @adapter: board private structure
572 * Configure the Tx unit of the MAC after a reset.
574 static void i40evf_configure_tx(struct i40evf_adapter *adapter)
576 struct i40e_hw *hw = &adapter->hw;
579 for (i = 0; i < adapter->num_active_queues; i++)
580 adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
584 * i40evf_configure_rx - Configure Receive Unit after Reset
585 * @adapter: board private structure
587 * Configure the Rx unit of the MAC after a reset.
589 static void i40evf_configure_rx(struct i40evf_adapter *adapter)
591 unsigned int rx_buf_len = I40E_RXBUFFER_2048;
592 struct i40e_hw *hw = &adapter->hw;
595 /* Legacy Rx will always default to a 2048 buffer size. */
596 #if (PAGE_SIZE < 8192)
597 if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
598 struct net_device *netdev = adapter->netdev;
600 /* For jumbo frames on systems with 4K pages we have to use
601 * an order 1 page, so we might as well increase the size
602 * of our Rx buffer to make better use of the available space
604 rx_buf_len = I40E_RXBUFFER_3072;
606 /* We use a 1536 buffer size for configurations with
607 * standard Ethernet mtu. On x86 this gives us enough room
608 * for shared info and 192 bytes of padding.
610 if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
611 (netdev->mtu <= ETH_DATA_LEN))
612 rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
616 for (i = 0; i < adapter->num_active_queues; i++) {
617 adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
618 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
620 if (adapter->flags & I40EVF_FLAG_LEGACY_RX)
621 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
623 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
628 * i40evf_find_vlan - Search filter list for specific vlan filter
629 * @adapter: board private structure
632 * Returns ptr to the filter object or NULL. Must be called while holding the
633 * mac_vlan_list_lock.
636 i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
638 struct i40evf_vlan_filter *f;
640 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
648 * i40evf_add_vlan - Add a vlan filter to the list
649 * @adapter: board private structure
652 * Returns ptr to the filter object or NULL when no memory available.
655 i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
657 struct i40evf_vlan_filter *f = NULL;
659 spin_lock_bh(&adapter->mac_vlan_list_lock);
661 f = i40evf_find_vlan(adapter, vlan);
663 f = kzalloc(sizeof(*f), GFP_KERNEL);
669 INIT_LIST_HEAD(&f->list);
670 list_add(&f->list, &adapter->vlan_filter_list);
672 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
676 spin_unlock_bh(&adapter->mac_vlan_list_lock);
681 * i40evf_del_vlan - Remove a vlan filter from the list
682 * @adapter: board private structure
685 static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
687 struct i40evf_vlan_filter *f;
689 spin_lock_bh(&adapter->mac_vlan_list_lock);
691 f = i40evf_find_vlan(adapter, vlan);
694 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
697 spin_unlock_bh(&adapter->mac_vlan_list_lock);
701 * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
702 * @netdev: network device struct
703 * @proto: unused protocol data
706 static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
707 __always_unused __be16 proto, u16 vid)
709 struct i40evf_adapter *adapter = netdev_priv(netdev);
711 if (!VLAN_ALLOWED(adapter))
713 if (i40evf_add_vlan(adapter, vid) == NULL)
719 * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
720 * @netdev: network device struct
721 * @proto: unused protocol data
724 static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
725 __always_unused __be16 proto, u16 vid)
727 struct i40evf_adapter *adapter = netdev_priv(netdev);
729 if (VLAN_ALLOWED(adapter)) {
730 i40evf_del_vlan(adapter, vid);
737 * i40evf_find_filter - Search filter list for specific mac filter
738 * @adapter: board private structure
739 * @macaddr: the MAC address
741 * Returns ptr to the filter object or NULL. Must be called while holding the
742 * mac_vlan_list_lock.
745 i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
748 struct i40evf_mac_filter *f;
753 list_for_each_entry(f, &adapter->mac_filter_list, list) {
754 if (ether_addr_equal(macaddr, f->macaddr))
761 * i40e_add_filter - Add a mac filter to the filter list
762 * @adapter: board private structure
763 * @macaddr: the MAC address
765 * Returns ptr to the filter object or NULL when no memory available.
768 i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
771 struct i40evf_mac_filter *f;
776 f = i40evf_find_filter(adapter, macaddr);
778 f = kzalloc(sizeof(*f), GFP_ATOMIC);
782 ether_addr_copy(f->macaddr, macaddr);
784 list_add_tail(&f->list, &adapter->mac_filter_list);
786 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
795 * i40evf_set_mac - NDO callback to set port mac address
796 * @netdev: network interface device structure
797 * @p: pointer to an address structure
799 * Returns 0 on success, negative on failure
801 static int i40evf_set_mac(struct net_device *netdev, void *p)
803 struct i40evf_adapter *adapter = netdev_priv(netdev);
804 struct i40e_hw *hw = &adapter->hw;
805 struct i40evf_mac_filter *f;
806 struct sockaddr *addr = p;
808 if (!is_valid_ether_addr(addr->sa_data))
809 return -EADDRNOTAVAIL;
811 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
814 if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
817 spin_lock_bh(&adapter->mac_vlan_list_lock);
819 f = i40evf_find_filter(adapter, hw->mac.addr);
822 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
825 f = i40evf_add_filter(adapter, addr->sa_data);
827 spin_unlock_bh(&adapter->mac_vlan_list_lock);
830 ether_addr_copy(hw->mac.addr, addr->sa_data);
831 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
834 return (f == NULL) ? -ENOMEM : 0;
838 * i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address
839 * @netdev: the netdevice
840 * @addr: address to add
842 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
843 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
845 static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr)
847 struct i40evf_adapter *adapter = netdev_priv(netdev);
849 if (i40evf_add_filter(adapter, addr))
856 * i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
857 * @netdev: the netdevice
858 * @addr: address to add
860 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
861 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
863 static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr)
865 struct i40evf_adapter *adapter = netdev_priv(netdev);
866 struct i40evf_mac_filter *f;
868 /* Under some circumstances, we might receive a request to delete
869 * our own device address from our uc list. Because we store the
870 * device address in the VSI's MAC/VLAN filter list, we need to ignore
871 * such requests and not delete our device address from this list.
873 if (ether_addr_equal(addr, netdev->dev_addr))
876 f = i40evf_find_filter(adapter, addr);
879 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
885 * i40evf_set_rx_mode - NDO callback to set the netdev filters
886 * @netdev: network interface device structure
888 static void i40evf_set_rx_mode(struct net_device *netdev)
890 struct i40evf_adapter *adapter = netdev_priv(netdev);
892 spin_lock_bh(&adapter->mac_vlan_list_lock);
893 __dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
894 __dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
895 spin_unlock_bh(&adapter->mac_vlan_list_lock);
897 if (netdev->flags & IFF_PROMISC &&
898 !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
899 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
900 else if (!(netdev->flags & IFF_PROMISC) &&
901 adapter->flags & I40EVF_FLAG_PROMISC_ON)
902 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
904 if (netdev->flags & IFF_ALLMULTI &&
905 !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
906 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
907 else if (!(netdev->flags & IFF_ALLMULTI) &&
908 adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
909 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
913 * i40evf_napi_enable_all - enable NAPI on all queue vectors
914 * @adapter: board private structure
916 static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
919 struct i40e_q_vector *q_vector;
920 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
922 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
923 struct napi_struct *napi;
925 q_vector = &adapter->q_vectors[q_idx];
926 napi = &q_vector->napi;
932 * i40evf_napi_disable_all - disable NAPI on all queue vectors
933 * @adapter: board private structure
935 static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
938 struct i40e_q_vector *q_vector;
939 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
941 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
942 q_vector = &adapter->q_vectors[q_idx];
943 napi_disable(&q_vector->napi);
948 * i40evf_configure - set up transmit and receive data structures
949 * @adapter: board private structure
951 static void i40evf_configure(struct i40evf_adapter *adapter)
953 struct net_device *netdev = adapter->netdev;
956 i40evf_set_rx_mode(netdev);
958 i40evf_configure_tx(adapter);
959 i40evf_configure_rx(adapter);
960 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
962 for (i = 0; i < adapter->num_active_queues; i++) {
963 struct i40e_ring *ring = &adapter->rx_rings[i];
965 i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
970 * i40evf_up_complete - Finish the last steps of bringing up a connection
971 * @adapter: board private structure
973 * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
975 static void i40evf_up_complete(struct i40evf_adapter *adapter)
977 adapter->state = __I40EVF_RUNNING;
978 clear_bit(__I40E_VSI_DOWN, adapter->vsi.state);
980 i40evf_napi_enable_all(adapter);
982 adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
983 if (CLIENT_ENABLED(adapter))
984 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
985 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
989 * i40e_down - Shutdown the connection processing
990 * @adapter: board private structure
992 * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
994 void i40evf_down(struct i40evf_adapter *adapter)
996 struct net_device *netdev = adapter->netdev;
997 struct i40evf_vlan_filter *vlf;
998 struct i40evf_mac_filter *f;
999 struct i40evf_cloud_filter *cf;
1001 if (adapter->state <= __I40EVF_DOWN_PENDING)
1004 netif_carrier_off(netdev);
1005 netif_tx_disable(netdev);
1006 adapter->link_up = false;
1007 i40evf_napi_disable_all(adapter);
1008 i40evf_irq_disable(adapter);
1010 spin_lock_bh(&adapter->mac_vlan_list_lock);
1012 /* clear the sync flag on all filters */
1013 __dev_uc_unsync(adapter->netdev, NULL);
1014 __dev_mc_unsync(adapter->netdev, NULL);
1016 /* remove all MAC filters */
1017 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1021 /* remove all VLAN filters */
1022 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1026 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1028 /* remove all cloud filters */
1029 spin_lock_bh(&adapter->cloud_filter_list_lock);
1030 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1033 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1035 if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
1036 adapter->state != __I40EVF_RESETTING) {
1037 /* cancel any current operation */
1038 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1039 /* Schedule operations to close down the HW. Don't wait
1040 * here for this to complete. The watchdog is still running
1041 * and it will take care of this.
1043 adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
1044 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
1045 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
1046 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
1049 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1053 * i40evf_acquire_msix_vectors - Setup the MSIX capability
1054 * @adapter: board private structure
1055 * @vectors: number of vectors to request
1057 * Work with the OS to set up the MSIX vectors needed.
1059 * Returns 0 on success, negative on failure
1062 i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
1064 int err, vector_threshold;
1066 /* We'll want at least 3 (vector_threshold):
1067 * 0) Other (Admin Queue and link, mostly)
1071 vector_threshold = MIN_MSIX_COUNT;
1073 /* The more we get, the more we will assign to Tx/Rx Cleanup
1074 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1075 * Right now, we simply care about how many we'll get; we'll
1076 * set them up later while requesting irq's.
1078 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1079 vector_threshold, vectors);
1081 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1082 kfree(adapter->msix_entries);
1083 adapter->msix_entries = NULL;
1087 /* Adjust for only the vectors we'll use, which is minimum
1088 * of max_msix_q_vectors + NONQ_VECS, or the number of
1089 * vectors we were allocated.
1091 adapter->num_msix_vectors = err;
1096 * i40evf_free_queues - Free memory for all rings
1097 * @adapter: board private structure to initialize
1099 * Free all of the memory associated with queue pairs.
1101 static void i40evf_free_queues(struct i40evf_adapter *adapter)
1103 if (!adapter->vsi_res)
1105 adapter->num_active_queues = 0;
1106 kfree(adapter->tx_rings);
1107 adapter->tx_rings = NULL;
1108 kfree(adapter->rx_rings);
1109 adapter->rx_rings = NULL;
1113 * i40evf_alloc_queues - Allocate memory for all rings
1114 * @adapter: board private structure to initialize
1116 * We allocate one ring per queue at run-time since we don't know the
1117 * number of queues at compile-time. The polling_netdev array is
1118 * intended for Multiqueue, but should work fine with a single queue.
1120 static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
1122 int i, num_active_queues;
1124 /* If we're in reset reallocating queues we don't actually know yet for
1125 * certain the PF gave us the number of queues we asked for but we'll
1126 * assume it did. Once basic reset is finished we'll confirm once we
1127 * start negotiating config with PF.
1129 if (adapter->num_req_queues)
1130 num_active_queues = adapter->num_req_queues;
1131 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1133 num_active_queues = adapter->ch_config.total_qps;
1135 num_active_queues = min_t(int,
1136 adapter->vsi_res->num_queue_pairs,
1137 (int)(num_online_cpus()));
1140 adapter->tx_rings = kcalloc(num_active_queues,
1141 sizeof(struct i40e_ring), GFP_KERNEL);
1142 if (!adapter->tx_rings)
1144 adapter->rx_rings = kcalloc(num_active_queues,
1145 sizeof(struct i40e_ring), GFP_KERNEL);
1146 if (!adapter->rx_rings)
1149 for (i = 0; i < num_active_queues; i++) {
1150 struct i40e_ring *tx_ring;
1151 struct i40e_ring *rx_ring;
1153 tx_ring = &adapter->tx_rings[i];
1155 tx_ring->queue_index = i;
1156 tx_ring->netdev = adapter->netdev;
1157 tx_ring->dev = &adapter->pdev->dev;
1158 tx_ring->count = adapter->tx_desc_count;
1159 tx_ring->itr_setting = I40E_ITR_TX_DEF;
1160 if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE)
1161 tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
1163 rx_ring = &adapter->rx_rings[i];
1164 rx_ring->queue_index = i;
1165 rx_ring->netdev = adapter->netdev;
1166 rx_ring->dev = &adapter->pdev->dev;
1167 rx_ring->count = adapter->rx_desc_count;
1168 rx_ring->itr_setting = I40E_ITR_RX_DEF;
1171 adapter->num_active_queues = num_active_queues;
1176 i40evf_free_queues(adapter);
1181 * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
1182 * @adapter: board private structure to initialize
1184 * Attempt to configure the interrupts using the best available
1185 * capabilities of the hardware and the kernel.
1187 static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1189 int vector, v_budget;
1193 if (!adapter->vsi_res) {
1197 pairs = adapter->num_active_queues;
1199 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1200 * us much good if we have more vectors than CPUs. However, we already
1201 * limit the total number of queues by the number of CPUs so we do not
1202 * need any further limiting here.
1204 v_budget = min_t(int, pairs + NONQ_VECS,
1205 (int)adapter->vf_res->max_vectors);
1207 adapter->msix_entries = kcalloc(v_budget,
1208 sizeof(struct msix_entry), GFP_KERNEL);
1209 if (!adapter->msix_entries) {
1214 for (vector = 0; vector < v_budget; vector++)
1215 adapter->msix_entries[vector].entry = vector;
1217 err = i40evf_acquire_msix_vectors(adapter, v_budget);
1220 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1221 netif_set_real_num_tx_queues(adapter->netdev, pairs);
1226 * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
1227 * @adapter: board private structure
1229 * Return 0 on success, negative on failure
1231 static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
1233 struct i40e_aqc_get_set_rss_key_data *rss_key =
1234 (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
1235 struct i40e_hw *hw = &adapter->hw;
1238 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1239 /* bail because we already have a command pending */
1240 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1241 adapter->current_op);
1245 ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1247 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1248 i40evf_stat_str(hw, ret),
1249 i40evf_aq_str(hw, hw->aq.asq_last_status));
1254 ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1255 adapter->rss_lut, adapter->rss_lut_size);
1257 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1258 i40evf_stat_str(hw, ret),
1259 i40evf_aq_str(hw, hw->aq.asq_last_status));
1267 * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
1268 * @adapter: board private structure
1270 * Returns 0 on success, negative on failure
1272 static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
1274 struct i40e_hw *hw = &adapter->hw;
1278 dw = (u32 *)adapter->rss_key;
1279 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1280 wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
1282 dw = (u32 *)adapter->rss_lut;
1283 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1284 wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
1292 * i40evf_config_rss - Configure RSS keys and lut
1293 * @adapter: board private structure
1295 * Returns 0 on success, negative on failure
1297 int i40evf_config_rss(struct i40evf_adapter *adapter)
1300 if (RSS_PF(adapter)) {
1301 adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
1302 I40EVF_FLAG_AQ_SET_RSS_KEY;
1304 } else if (RSS_AQ(adapter)) {
1305 return i40evf_config_rss_aq(adapter);
1307 return i40evf_config_rss_reg(adapter);
1312 * i40evf_fill_rss_lut - Fill the lut with default values
1313 * @adapter: board private structure
1315 static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
1319 for (i = 0; i < adapter->rss_lut_size; i++)
1320 adapter->rss_lut[i] = i % adapter->num_active_queues;
1324 * i40evf_init_rss - Prepare for RSS
1325 * @adapter: board private structure
1327 * Return 0 on success, negative on failure
1329 static int i40evf_init_rss(struct i40evf_adapter *adapter)
1331 struct i40e_hw *hw = &adapter->hw;
1334 if (!RSS_PF(adapter)) {
1335 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1336 if (adapter->vf_res->vf_cap_flags &
1337 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1338 adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
1340 adapter->hena = I40E_DEFAULT_RSS_HENA;
1342 wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
1343 wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1346 i40evf_fill_rss_lut(adapter);
1348 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1349 ret = i40evf_config_rss(adapter);
1355 * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
1356 * @adapter: board private structure to initialize
1358 * We allocate one q_vector per queue interrupt. If allocation fails we
1361 static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1363 int q_idx = 0, num_q_vectors;
1364 struct i40e_q_vector *q_vector;
1366 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1367 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1369 if (!adapter->q_vectors)
1372 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1373 q_vector = &adapter->q_vectors[q_idx];
1374 q_vector->adapter = adapter;
1375 q_vector->vsi = &adapter->vsi;
1376 q_vector->v_idx = q_idx;
1377 q_vector->reg_idx = q_idx;
1378 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1379 netif_napi_add(adapter->netdev, &q_vector->napi,
1380 i40evf_napi_poll, NAPI_POLL_WEIGHT);
1387 * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
1388 * @adapter: board private structure to initialize
1390 * This function frees the memory allocated to the q_vectors. In addition if
1391 * NAPI is enabled it will delete any references to the NAPI struct prior
1392 * to freeing the q_vector.
1394 static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
1396 int q_idx, num_q_vectors;
1399 if (!adapter->q_vectors)
1402 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1403 napi_vectors = adapter->num_active_queues;
1405 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1406 struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
1407 if (q_idx < napi_vectors)
1408 netif_napi_del(&q_vector->napi);
1410 kfree(adapter->q_vectors);
1411 adapter->q_vectors = NULL;
1415 * i40evf_reset_interrupt_capability - Reset MSIX setup
1416 * @adapter: board private structure
1419 void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
1421 if (!adapter->msix_entries)
1424 pci_disable_msix(adapter->pdev);
1425 kfree(adapter->msix_entries);
1426 adapter->msix_entries = NULL;
1430 * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
1431 * @adapter: board private structure to initialize
1434 int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
1438 err = i40evf_alloc_queues(adapter);
1440 dev_err(&adapter->pdev->dev,
1441 "Unable to allocate memory for queues\n");
1442 goto err_alloc_queues;
1446 err = i40evf_set_interrupt_capability(adapter);
1449 dev_err(&adapter->pdev->dev,
1450 "Unable to setup interrupt capabilities\n");
1451 goto err_set_interrupt;
1454 err = i40evf_alloc_q_vectors(adapter);
1456 dev_err(&adapter->pdev->dev,
1457 "Unable to allocate memory for queue vectors\n");
1458 goto err_alloc_q_vectors;
1461 /* If we've made it so far while ADq flag being ON, then we haven't
1462 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1463 * resources have been allocated in the reset path.
1464 * Now we can truly claim that ADq is enabled.
1466 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1468 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1471 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1472 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1473 adapter->num_active_queues);
1476 err_alloc_q_vectors:
1477 i40evf_reset_interrupt_capability(adapter);
1479 i40evf_free_queues(adapter);
1485 * i40evf_free_rss - Free memory used by RSS structs
1486 * @adapter: board private structure
1488 static void i40evf_free_rss(struct i40evf_adapter *adapter)
1490 kfree(adapter->rss_key);
1491 adapter->rss_key = NULL;
1493 kfree(adapter->rss_lut);
1494 adapter->rss_lut = NULL;
1498 * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors
1499 * @adapter: board private structure
1501 * Returns 0 on success, negative on failure
1503 static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter)
1505 struct net_device *netdev = adapter->netdev;
1508 if (netif_running(netdev))
1509 i40evf_free_traffic_irqs(adapter);
1510 i40evf_free_misc_irq(adapter);
1511 i40evf_reset_interrupt_capability(adapter);
1512 i40evf_free_q_vectors(adapter);
1513 i40evf_free_queues(adapter);
1515 err = i40evf_init_interrupt_scheme(adapter);
1519 netif_tx_stop_all_queues(netdev);
1521 err = i40evf_request_misc_irq(adapter);
1525 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
1527 i40evf_map_rings_to_vectors(adapter);
1529 if (RSS_AQ(adapter))
1530 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
1532 err = i40evf_init_rss(adapter);
1538 * i40evf_watchdog_timer - Periodic call-back timer
1539 * @data: pointer to adapter disguised as unsigned long
1541 static void i40evf_watchdog_timer(struct timer_list *t)
1543 struct i40evf_adapter *adapter = from_timer(adapter, t,
1546 schedule_work(&adapter->watchdog_task);
1547 /* timer will be rescheduled in watchdog task */
1551 * i40evf_watchdog_task - Periodic call-back task
1552 * @work: pointer to work_struct
1554 static void i40evf_watchdog_task(struct work_struct *work)
1556 struct i40evf_adapter *adapter = container_of(work,
1557 struct i40evf_adapter,
1559 struct i40e_hw *hw = &adapter->hw;
1562 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
1563 goto restart_watchdog;
1565 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1566 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1567 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1568 if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
1569 (reg_val == VIRTCHNL_VFR_COMPLETED)) {
1570 /* A chance for redemption! */
1571 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1572 adapter->state = __I40EVF_STARTUP;
1573 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
1574 schedule_delayed_work(&adapter->init_task, 10);
1575 clear_bit(__I40EVF_IN_CRITICAL_TASK,
1576 &adapter->crit_section);
1577 /* Don't reschedule the watchdog, since we've restarted
1578 * the init task. When init_task contacts the PF and
1579 * gets everything set up again, it'll restart the
1580 * watchdog for us. Down, boy. Sit. Stay. Woof.
1584 adapter->aq_required = 0;
1585 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1589 if ((adapter->state < __I40EVF_DOWN) ||
1590 (adapter->flags & I40EVF_FLAG_RESET_PENDING))
1593 /* check for reset */
1594 reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
1595 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
1596 adapter->state = __I40EVF_RESETTING;
1597 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1598 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1599 schedule_work(&adapter->reset_task);
1600 adapter->aq_required = 0;
1601 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1605 /* Process admin queue tasks. After init, everything gets done
1606 * here so we don't race on the admin queue.
1608 if (adapter->current_op) {
1609 if (!i40evf_asq_done(hw)) {
1610 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1611 i40evf_send_api_ver(adapter);
1615 if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
1616 i40evf_send_vf_config_msg(adapter);
1620 if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
1621 i40evf_disable_queues(adapter);
1625 if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
1626 i40evf_map_queues(adapter);
1630 if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
1631 i40evf_add_ether_addrs(adapter);
1635 if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
1636 i40evf_add_vlans(adapter);
1640 if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
1641 i40evf_del_ether_addrs(adapter);
1645 if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
1646 i40evf_del_vlans(adapter);
1650 if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1651 i40evf_enable_vlan_stripping(adapter);
1655 if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1656 i40evf_disable_vlan_stripping(adapter);
1660 if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
1661 i40evf_configure_queues(adapter);
1665 if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
1666 i40evf_enable_queues(adapter);
1670 if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
1671 /* This message goes straight to the firmware, not the
1672 * PF, so we don't have to set current_op as we will
1673 * not get a response through the ARQ.
1675 i40evf_init_rss(adapter);
1676 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
1679 if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
1680 i40evf_get_hena(adapter);
1683 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
1684 i40evf_set_hena(adapter);
1687 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
1688 i40evf_set_rss_key(adapter);
1691 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
1692 i40evf_set_rss_lut(adapter);
1696 if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
1697 i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1698 FLAG_VF_MULTICAST_PROMISC);
1702 if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
1703 i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1707 if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
1708 (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1709 i40evf_set_promiscuous(adapter, 0);
1713 if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) {
1714 i40evf_enable_channels(adapter);
1718 if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) {
1719 i40evf_disable_channels(adapter);
1723 if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1724 i40evf_add_cloud_filter(adapter);
1728 if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1729 i40evf_del_cloud_filter(adapter);
1733 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
1735 if (adapter->state == __I40EVF_RUNNING)
1736 i40evf_request_stats(adapter);
1738 if (adapter->state == __I40EVF_RUNNING)
1739 i40evf_detect_recover_hung(&adapter->vsi);
1740 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1742 if (adapter->state == __I40EVF_REMOVE)
1744 if (adapter->aq_required)
1745 mod_timer(&adapter->watchdog_timer,
1746 jiffies + msecs_to_jiffies(20));
1748 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
1749 schedule_work(&adapter->adminq_task);
1752 static void i40evf_disable_vf(struct i40evf_adapter *adapter)
1754 struct i40evf_mac_filter *f, *ftmp;
1755 struct i40evf_vlan_filter *fv, *fvtmp;
1756 struct i40evf_cloud_filter *cf, *cftmp;
1758 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
1760 /* We don't use netif_running() because it may be true prior to
1761 * ndo_open() returning, so we can't assume it means all our open
1762 * tasks have finished, since we're not holding the rtnl_lock here.
1764 if (adapter->state == __I40EVF_RUNNING) {
1765 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
1766 netif_carrier_off(adapter->netdev);
1767 netif_tx_disable(adapter->netdev);
1768 adapter->link_up = false;
1769 i40evf_napi_disable_all(adapter);
1770 i40evf_irq_disable(adapter);
1771 i40evf_free_traffic_irqs(adapter);
1772 i40evf_free_all_tx_resources(adapter);
1773 i40evf_free_all_rx_resources(adapter);
1776 spin_lock_bh(&adapter->mac_vlan_list_lock);
1778 /* Delete all of the filters */
1779 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
1784 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
1785 list_del(&fv->list);
1789 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1791 spin_lock_bh(&adapter->cloud_filter_list_lock);
1792 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1793 list_del(&cf->list);
1795 adapter->num_cloud_filters--;
1797 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1799 i40evf_free_misc_irq(adapter);
1800 i40evf_reset_interrupt_capability(adapter);
1801 i40evf_free_queues(adapter);
1802 i40evf_free_q_vectors(adapter);
1803 kfree(adapter->vf_res);
1804 i40evf_shutdown_adminq(&adapter->hw);
1805 adapter->netdev->flags &= ~IFF_UP;
1806 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1807 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1808 adapter->state = __I40EVF_DOWN;
1809 wake_up(&adapter->down_waitqueue);
1810 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
1813 #define I40EVF_RESET_WAIT_MS 10
1814 #define I40EVF_RESET_WAIT_COUNT 500
1816 * i40evf_reset_task - Call-back task to handle hardware reset
1817 * @work: pointer to work_struct
1819 * During reset we need to shut down and reinitialize the admin queue
1820 * before we can use it to communicate with the PF again. We also clear
1821 * and reinit the rings because that context is lost as well.
1823 static void i40evf_reset_task(struct work_struct *work)
1825 struct i40evf_adapter *adapter = container_of(work,
1826 struct i40evf_adapter,
1828 struct virtchnl_vf_resource *vfres = adapter->vf_res;
1829 struct net_device *netdev = adapter->netdev;
1830 struct i40e_hw *hw = &adapter->hw;
1831 struct i40evf_vlan_filter *vlf;
1832 struct i40evf_cloud_filter *cf;
1833 struct i40evf_mac_filter *f;
1838 /* When device is being removed it doesn't make sense to run the reset
1839 * task, just return in such a case.
1841 if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section))
1844 while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
1845 &adapter->crit_section))
1846 usleep_range(500, 1000);
1847 if (CLIENT_ENABLED(adapter)) {
1848 adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
1849 I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
1850 I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
1851 I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
1852 cancel_delayed_work_sync(&adapter->client_task);
1853 i40evf_notify_client_close(&adapter->vsi, true);
1855 i40evf_misc_irq_disable(adapter);
1856 if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
1857 adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
1858 /* Restart the AQ here. If we have been reset but didn't
1859 * detect it, or if the PF had to reinit, our AQ will be hosed.
1861 i40evf_shutdown_adminq(hw);
1862 i40evf_init_adminq(hw);
1863 i40evf_request_reset(adapter);
1865 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1867 /* poll until we see the reset actually happen */
1868 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1869 reg_val = rd32(hw, I40E_VF_ARQLEN1) &
1870 I40E_VF_ARQLEN1_ARQENABLE_MASK;
1873 usleep_range(5000, 10000);
1875 if (i == I40EVF_RESET_WAIT_COUNT) {
1876 dev_info(&adapter->pdev->dev, "Never saw reset\n");
1877 goto continue_reset; /* act like the reset happened */
1880 /* wait until the reset is complete and the PF is responding to us */
1881 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1882 /* sleep first to make sure a minimum wait time is met */
1883 msleep(I40EVF_RESET_WAIT_MS);
1885 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1886 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1887 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
1891 pci_set_master(adapter->pdev);
1893 if (i == I40EVF_RESET_WAIT_COUNT) {
1894 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1896 i40evf_disable_vf(adapter);
1897 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
1898 return; /* Do not attempt to reinit. It's dead, Jim. */
1902 /* We don't use netif_running() because it may be true prior to
1903 * ndo_open() returning, so we can't assume it means all our open
1904 * tasks have finished, since we're not holding the rtnl_lock here.
1906 running = ((adapter->state == __I40EVF_RUNNING) ||
1907 (adapter->state == __I40EVF_RESETTING));
1910 netif_carrier_off(netdev);
1911 netif_tx_stop_all_queues(netdev);
1912 adapter->link_up = false;
1913 i40evf_napi_disable_all(adapter);
1915 i40evf_irq_disable(adapter);
1917 adapter->state = __I40EVF_RESETTING;
1918 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1920 /* free the Tx/Rx rings and descriptors, might be better to just
1921 * re-use them sometime in the future
1923 i40evf_free_all_rx_resources(adapter);
1924 i40evf_free_all_tx_resources(adapter);
1926 adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED;
1927 /* kill and reinit the admin queue */
1928 i40evf_shutdown_adminq(hw);
1929 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1930 err = i40evf_init_adminq(hw);
1932 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
1934 adapter->aq_required = 0;
1936 if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
1937 err = i40evf_reinit_interrupt_scheme(adapter);
1942 adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
1943 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
1945 spin_lock_bh(&adapter->mac_vlan_list_lock);
1947 /* re-add all MAC filters */
1948 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1951 /* re-add all VLAN filters */
1952 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1956 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1958 /* check if TCs are running and re-add all cloud filters */
1959 spin_lock_bh(&adapter->cloud_filter_list_lock);
1960 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1962 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1966 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1968 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
1969 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
1970 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
1971 i40evf_misc_irq_enable(adapter);
1973 mod_timer(&adapter->watchdog_timer, jiffies + 2);
1975 /* We were running when the reset started, so we need to restore some
1979 /* allocate transmit descriptors */
1980 err = i40evf_setup_all_tx_resources(adapter);
1984 /* allocate receive descriptors */
1985 err = i40evf_setup_all_rx_resources(adapter);
1989 if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
1990 err = i40evf_request_traffic_irqs(adapter,
1995 adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
1998 i40evf_configure(adapter);
2000 i40evf_up_complete(adapter);
2002 i40evf_irq_enable(adapter, true);
2004 adapter->state = __I40EVF_DOWN;
2005 wake_up(&adapter->down_waitqueue);
2007 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
2008 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
2012 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
2013 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
2014 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
2015 i40evf_close(netdev);
2019 * i40evf_adminq_task - worker thread to clean the admin queue
2020 * @work: pointer to work_struct containing our data
2022 static void i40evf_adminq_task(struct work_struct *work)
2024 struct i40evf_adapter *adapter =
2025 container_of(work, struct i40evf_adapter, adminq_task);
2026 struct i40e_hw *hw = &adapter->hw;
2027 struct i40e_arq_event_info event;
2028 enum virtchnl_ops v_op;
2029 i40e_status ret, v_ret;
2033 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
2036 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
2037 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
2042 ret = i40evf_clean_arq_element(hw, &event, &pending);
2043 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
2044 v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low);
2047 break; /* No event to process or error cleaning ARQ */
2049 i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2052 memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
2055 if ((adapter->flags &
2056 (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) ||
2057 adapter->state == __I40EVF_RESETTING)
2060 /* check for error indications */
2061 val = rd32(hw, hw->aq.arq.len);
2062 if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
2065 if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2066 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
2067 val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2069 if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2070 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
2071 val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2073 if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2074 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
2075 val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2078 wr32(hw, hw->aq.arq.len, val);
2080 val = rd32(hw, hw->aq.asq.len);
2082 if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2083 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2084 val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2086 if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2087 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2088 val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2090 if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2091 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2092 val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2095 wr32(hw, hw->aq.asq.len, val);
2098 kfree(event.msg_buf);
2100 /* re-enable Admin queue interrupt cause */
2101 i40evf_misc_irq_enable(adapter);
2105 * i40evf_client_task - worker thread to perform client work
2106 * @work: pointer to work_struct containing our data
2108 * This task handles client interactions. Because client calls can be
2109 * reentrant, we can't handle them in the watchdog.
2111 static void i40evf_client_task(struct work_struct *work)
2113 struct i40evf_adapter *adapter =
2114 container_of(work, struct i40evf_adapter, client_task.work);
2116 /* If we can't get the client bit, just give up. We'll be rescheduled
2120 if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
2123 if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2124 i40evf_client_subtask(adapter);
2125 adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2128 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2129 i40evf_notify_client_l2_params(&adapter->vsi);
2130 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2133 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
2134 i40evf_notify_client_close(&adapter->vsi, false);
2135 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
2138 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
2139 i40evf_notify_client_open(&adapter->vsi);
2140 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
2143 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
2147 * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
2148 * @adapter: board private structure
2150 * Free all transmit software resources
2152 void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
2156 if (!adapter->tx_rings)
2159 for (i = 0; i < adapter->num_active_queues; i++)
2160 if (adapter->tx_rings[i].desc)
2161 i40evf_free_tx_resources(&adapter->tx_rings[i]);
2165 * i40evf_setup_all_tx_resources - allocate all queues Tx resources
2166 * @adapter: board private structure
2168 * If this function returns with an error, then it's possible one or
2169 * more of the rings is populated (while the rest are not). It is the
2170 * callers duty to clean those orphaned rings.
2172 * Return 0 on success, negative on failure
2174 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
2178 for (i = 0; i < adapter->num_active_queues; i++) {
2179 adapter->tx_rings[i].count = adapter->tx_desc_count;
2180 err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
2183 dev_err(&adapter->pdev->dev,
2184 "Allocation for Tx Queue %u failed\n", i);
2192 * i40evf_setup_all_rx_resources - allocate all queues Rx resources
2193 * @adapter: board private structure
2195 * If this function returns with an error, then it's possible one or
2196 * more of the rings is populated (while the rest are not). It is the
2197 * callers duty to clean those orphaned rings.
2199 * Return 0 on success, negative on failure
2201 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
2205 for (i = 0; i < adapter->num_active_queues; i++) {
2206 adapter->rx_rings[i].count = adapter->rx_desc_count;
2207 err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
2210 dev_err(&adapter->pdev->dev,
2211 "Allocation for Rx Queue %u failed\n", i);
2218 * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
2219 * @adapter: board private structure
2221 * Free all receive software resources
2223 void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
2227 if (!adapter->rx_rings)
2230 for (i = 0; i < adapter->num_active_queues; i++)
2231 if (adapter->rx_rings[i].desc)
2232 i40evf_free_rx_resources(&adapter->rx_rings[i]);
2236 * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth
2237 * @adapter: board private structure
2238 * @max_tx_rate: max Tx bw for a tc
2240 static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter,
2243 int speed = 0, ret = 0;
2245 switch (adapter->link_speed) {
2246 case I40E_LINK_SPEED_40GB:
2249 case I40E_LINK_SPEED_25GB:
2252 case I40E_LINK_SPEED_20GB:
2255 case I40E_LINK_SPEED_10GB:
2258 case I40E_LINK_SPEED_1GB:
2261 case I40E_LINK_SPEED_100MB:
2268 if (max_tx_rate > speed) {
2269 dev_err(&adapter->pdev->dev,
2270 "Invalid tx rate specified\n");
2278 * i40evf_validate_channel_config - validate queue mapping info
2279 * @adapter: board private structure
2280 * @mqprio_qopt: queue parameters
2282 * This function validates if the config provided by the user to
2283 * configure queue channels is valid or not. Returns 0 on a valid
2286 static int i40evf_validate_ch_config(struct i40evf_adapter *adapter,
2287 struct tc_mqprio_qopt_offload *mqprio_qopt)
2289 u64 total_max_rate = 0;
2294 if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS ||
2295 mqprio_qopt->qopt.num_tc < 1)
2298 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2299 if (!mqprio_qopt->qopt.count[i] ||
2300 mqprio_qopt->qopt.offset[i] != num_qps)
2302 if (mqprio_qopt->min_rate[i]) {
2303 dev_err(&adapter->pdev->dev,
2304 "Invalid min tx rate (greater than 0) specified\n");
2307 /*convert to Mbps */
2308 tx_rate = div_u64(mqprio_qopt->max_rate[i],
2309 I40EVF_MBPS_DIVISOR);
2310 total_max_rate += tx_rate;
2311 num_qps += mqprio_qopt->qopt.count[i];
2313 if (num_qps > I40EVF_MAX_REQ_QUEUES)
2316 ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate);
2321 * i40evf_del_all_cloud_filters - delete all cloud filters
2322 * on the traffic classes
2324 static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter)
2326 struct i40evf_cloud_filter *cf, *cftmp;
2328 spin_lock_bh(&adapter->cloud_filter_list_lock);
2329 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2331 list_del(&cf->list);
2333 adapter->num_cloud_filters--;
2335 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2339 * __i40evf_setup_tc - configure multiple traffic classes
2340 * @netdev: network interface device structure
2341 * @type_date: tc offload data
2343 * This function processes the config information provided by the
2344 * user to configure traffic classes/queue channels and packages the
2345 * information to request the PF to setup traffic classes.
2347 * Returns 0 on success.
2349 static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
2351 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2352 struct i40evf_adapter *adapter = netdev_priv(netdev);
2353 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2354 u8 num_tc = 0, total_qps = 0;
2355 int ret = 0, netdev_tc = 0;
2360 num_tc = mqprio_qopt->qopt.num_tc;
2361 mode = mqprio_qopt->mode;
2363 /* delete queue_channel */
2364 if (!mqprio_qopt->qopt.hw) {
2365 if (adapter->ch_config.state == __I40EVF_TC_RUNNING) {
2366 /* reset the tc configuration */
2367 netdev_reset_tc(netdev);
2368 adapter->num_tc = 0;
2369 netif_tx_stop_all_queues(netdev);
2370 netif_tx_disable(netdev);
2371 i40evf_del_all_cloud_filters(adapter);
2372 adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS;
2379 /* add queue channel */
2380 if (mode == TC_MQPRIO_MODE_CHANNEL) {
2381 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2382 dev_err(&adapter->pdev->dev, "ADq not supported\n");
2385 if (adapter->ch_config.state != __I40EVF_TC_INVALID) {
2386 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2390 ret = i40evf_validate_ch_config(adapter, mqprio_qopt);
2393 /* Return if same TC config is requested */
2394 if (adapter->num_tc == num_tc)
2396 adapter->num_tc = num_tc;
2398 for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
2400 adapter->ch_config.ch_info[i].count =
2401 mqprio_qopt->qopt.count[i];
2402 adapter->ch_config.ch_info[i].offset =
2403 mqprio_qopt->qopt.offset[i];
2404 total_qps += mqprio_qopt->qopt.count[i];
2405 max_tx_rate = mqprio_qopt->max_rate[i];
2406 /* convert to Mbps */
2407 max_tx_rate = div_u64(max_tx_rate,
2408 I40EVF_MBPS_DIVISOR);
2409 adapter->ch_config.ch_info[i].max_tx_rate =
2412 adapter->ch_config.ch_info[i].count = 1;
2413 adapter->ch_config.ch_info[i].offset = 0;
2416 adapter->ch_config.total_qps = total_qps;
2417 netif_tx_stop_all_queues(netdev);
2418 netif_tx_disable(netdev);
2419 adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS;
2420 netdev_reset_tc(netdev);
2421 /* Report the tc mapping up the stack */
2422 netdev_set_num_tc(adapter->netdev, num_tc);
2423 for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
2424 u16 qcount = mqprio_qopt->qopt.count[i];
2425 u16 qoffset = mqprio_qopt->qopt.offset[i];
2428 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
2437 * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel
2438 * @adapter: board private structure
2439 * @cls_flower: pointer to struct tc_cls_flower_offload
2440 * @filter: pointer to cloud filter structure
2442 static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
2443 struct tc_cls_flower_offload *f,
2444 struct i40evf_cloud_filter *filter)
2446 u16 n_proto_mask = 0;
2447 u16 n_proto_key = 0;
2452 struct virtchnl_filter *vf = &filter->f;
2454 if (f->dissector->used_keys &
2455 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2456 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2457 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2458 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2459 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2460 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2461 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2462 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
2463 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
2464 f->dissector->used_keys);
2468 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
2469 struct flow_dissector_key_keyid *mask =
2470 skb_flow_dissector_target(f->dissector,
2471 FLOW_DISSECTOR_KEY_ENC_KEYID,
2474 if (mask->keyid != 0)
2475 field_flags |= I40EVF_CLOUD_FIELD_TEN_ID;
2478 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
2479 struct flow_dissector_key_basic *key =
2480 skb_flow_dissector_target(f->dissector,
2481 FLOW_DISSECTOR_KEY_BASIC,
2484 struct flow_dissector_key_basic *mask =
2485 skb_flow_dissector_target(f->dissector,
2486 FLOW_DISSECTOR_KEY_BASIC,
2488 n_proto_key = ntohs(key->n_proto);
2489 n_proto_mask = ntohs(mask->n_proto);
2491 if (n_proto_key == ETH_P_ALL) {
2495 n_proto = n_proto_key & n_proto_mask;
2496 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
2498 if (n_proto == ETH_P_IPV6) {
2499 /* specify flow type as TCP IPv6 */
2500 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
2503 if (key->ip_proto != IPPROTO_TCP) {
2504 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
2509 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2510 struct flow_dissector_key_eth_addrs *key =
2511 skb_flow_dissector_target(f->dissector,
2512 FLOW_DISSECTOR_KEY_ETH_ADDRS,
2515 struct flow_dissector_key_eth_addrs *mask =
2516 skb_flow_dissector_target(f->dissector,
2517 FLOW_DISSECTOR_KEY_ETH_ADDRS,
2519 /* use is_broadcast and is_zero to check for all 0xf or 0 */
2520 if (!is_zero_ether_addr(mask->dst)) {
2521 if (is_broadcast_ether_addr(mask->dst)) {
2522 field_flags |= I40EVF_CLOUD_FIELD_OMAC;
2524 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
2526 return I40E_ERR_CONFIG;
2530 if (!is_zero_ether_addr(mask->src)) {
2531 if (is_broadcast_ether_addr(mask->src)) {
2532 field_flags |= I40EVF_CLOUD_FIELD_IMAC;
2534 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
2536 return I40E_ERR_CONFIG;
2540 if (!is_zero_ether_addr(key->dst))
2541 if (is_valid_ether_addr(key->dst) ||
2542 is_multicast_ether_addr(key->dst)) {
2543 /* set the mask if a valid dst_mac address */
2544 for (i = 0; i < ETH_ALEN; i++)
2545 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
2546 ether_addr_copy(vf->data.tcp_spec.dst_mac,
2550 if (!is_zero_ether_addr(key->src))
2551 if (is_valid_ether_addr(key->src) ||
2552 is_multicast_ether_addr(key->src)) {
2553 /* set the mask if a valid dst_mac address */
2554 for (i = 0; i < ETH_ALEN; i++)
2555 vf->mask.tcp_spec.src_mac[i] |= 0xff;
2556 ether_addr_copy(vf->data.tcp_spec.src_mac,
2561 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
2562 struct flow_dissector_key_vlan *key =
2563 skb_flow_dissector_target(f->dissector,
2564 FLOW_DISSECTOR_KEY_VLAN,
2566 struct flow_dissector_key_vlan *mask =
2567 skb_flow_dissector_target(f->dissector,
2568 FLOW_DISSECTOR_KEY_VLAN,
2571 if (mask->vlan_id) {
2572 if (mask->vlan_id == VLAN_VID_MASK) {
2573 field_flags |= I40EVF_CLOUD_FIELD_IVLAN;
2575 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
2577 return I40E_ERR_CONFIG;
2580 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
2581 vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id);
2584 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
2585 struct flow_dissector_key_control *key =
2586 skb_flow_dissector_target(f->dissector,
2587 FLOW_DISSECTOR_KEY_CONTROL,
2590 addr_type = key->addr_type;
2593 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2594 struct flow_dissector_key_ipv4_addrs *key =
2595 skb_flow_dissector_target(f->dissector,
2596 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
2598 struct flow_dissector_key_ipv4_addrs *mask =
2599 skb_flow_dissector_target(f->dissector,
2600 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
2604 if (mask->dst == cpu_to_be32(0xffffffff)) {
2605 field_flags |= I40EVF_CLOUD_FIELD_IIP;
2607 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
2608 be32_to_cpu(mask->dst));
2609 return I40E_ERR_CONFIG;
2614 if (mask->src == cpu_to_be32(0xffffffff)) {
2615 field_flags |= I40EVF_CLOUD_FIELD_IIP;
2617 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
2618 be32_to_cpu(mask->dst));
2619 return I40E_ERR_CONFIG;
2623 if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) {
2624 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
2625 return I40E_ERR_CONFIG;
2628 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
2629 vf->data.tcp_spec.dst_ip[0] = key->dst;
2632 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
2633 vf->data.tcp_spec.src_ip[0] = key->src;
2637 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2638 struct flow_dissector_key_ipv6_addrs *key =
2639 skb_flow_dissector_target(f->dissector,
2640 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
2642 struct flow_dissector_key_ipv6_addrs *mask =
2643 skb_flow_dissector_target(f->dissector,
2644 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
2647 /* validate mask, make sure it is not IPV6_ADDR_ANY */
2648 if (ipv6_addr_any(&mask->dst)) {
2649 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
2651 return I40E_ERR_CONFIG;
2654 /* src and dest IPv6 address should not be LOOPBACK
2655 * (0:0:0:0:0:0:0:1) which can be represented as ::1
2657 if (ipv6_addr_loopback(&key->dst) ||
2658 ipv6_addr_loopback(&key->src)) {
2659 dev_err(&adapter->pdev->dev,
2660 "ipv6 addr should not be loopback\n");
2661 return I40E_ERR_CONFIG;
2663 if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
2664 field_flags |= I40EVF_CLOUD_FIELD_IIP;
2666 for (i = 0; i < 4; i++)
2667 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
2668 memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32,
2669 sizeof(vf->data.tcp_spec.dst_ip));
2670 for (i = 0; i < 4; i++)
2671 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
2672 memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32,
2673 sizeof(vf->data.tcp_spec.src_ip));
2675 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
2676 struct flow_dissector_key_ports *key =
2677 skb_flow_dissector_target(f->dissector,
2678 FLOW_DISSECTOR_KEY_PORTS,
2680 struct flow_dissector_key_ports *mask =
2681 skb_flow_dissector_target(f->dissector,
2682 FLOW_DISSECTOR_KEY_PORTS,
2686 if (mask->src == cpu_to_be16(0xffff)) {
2687 field_flags |= I40EVF_CLOUD_FIELD_IIP;
2689 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
2690 be16_to_cpu(mask->src));
2691 return I40E_ERR_CONFIG;
2696 if (mask->dst == cpu_to_be16(0xffff)) {
2697 field_flags |= I40EVF_CLOUD_FIELD_IIP;
2699 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
2700 be16_to_cpu(mask->dst));
2701 return I40E_ERR_CONFIG;
2705 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
2706 vf->data.tcp_spec.dst_port = key->dst;
2710 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
2711 vf->data.tcp_spec.src_port = key->src;
2714 vf->field_flags = field_flags;
2720 * i40evf_handle_tclass - Forward to a traffic class on the device
2721 * @adapter: board private structure
2722 * @tc: traffic class index on the device
2723 * @filter: pointer to cloud filter structure
2725 static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc,
2726 struct i40evf_cloud_filter *filter)
2730 if (tc < adapter->num_tc) {
2731 if (!filter->f.data.tcp_spec.dst_port) {
2732 dev_err(&adapter->pdev->dev,
2733 "Specify destination port to redirect to traffic class other than TC0\n");
2737 /* redirect to a traffic class on the same device */
2738 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
2739 filter->f.action_meta = tc;
2744 * i40evf_configure_clsflower - Add tc flower filters
2745 * @adapter: board private structure
2746 * @cls_flower: Pointer to struct tc_cls_flower_offload
2748 static int i40evf_configure_clsflower(struct i40evf_adapter *adapter,
2749 struct tc_cls_flower_offload *cls_flower)
2751 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2752 struct i40evf_cloud_filter *filter = NULL;
2753 int err = -EINVAL, count = 50;
2756 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
2760 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2764 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
2765 &adapter->crit_section)) {
2771 filter->cookie = cls_flower->cookie;
2773 /* set the mask to all zeroes to begin with */
2774 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
2775 /* start out with flow type and eth type IPv4 to begin with */
2776 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
2777 err = i40evf_parse_cls_flower(adapter, cls_flower, filter);
2781 err = i40evf_handle_tclass(adapter, tc, filter);
2785 /* add filter to the list */
2786 spin_lock_bh(&adapter->cloud_filter_list_lock);
2787 list_add_tail(&filter->list, &adapter->cloud_filter_list);
2788 adapter->num_cloud_filters++;
2790 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
2791 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2796 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
2800 /* i40evf_find_cf - Find the cloud filter in the list
2801 * @adapter: Board private structure
2802 * @cookie: filter specific cookie
2804 * Returns ptr to the filter object or NULL. Must be called while holding the
2805 * cloud_filter_list_lock.
2807 static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter,
2808 unsigned long *cookie)
2810 struct i40evf_cloud_filter *filter = NULL;
2815 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
2816 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
2823 * i40evf_delete_clsflower - Remove tc flower filters
2824 * @adapter: board private structure
2825 * @cls_flower: Pointer to struct tc_cls_flower_offload
2827 static int i40evf_delete_clsflower(struct i40evf_adapter *adapter,
2828 struct tc_cls_flower_offload *cls_flower)
2830 struct i40evf_cloud_filter *filter = NULL;
2833 spin_lock_bh(&adapter->cloud_filter_list_lock);
2834 filter = i40evf_find_cf(adapter, &cls_flower->cookie);
2837 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
2841 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2847 * i40evf_setup_tc_cls_flower - flower classifier offloads
2848 * @netdev: net device to configure
2849 * @type_data: offload data
2851 static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
2852 struct tc_cls_flower_offload *cls_flower)
2854 if (cls_flower->common.chain_index)
2857 switch (cls_flower->command) {
2858 case TC_CLSFLOWER_REPLACE:
2859 return i40evf_configure_clsflower(adapter, cls_flower);
2860 case TC_CLSFLOWER_DESTROY:
2861 return i40evf_delete_clsflower(adapter, cls_flower);
2862 case TC_CLSFLOWER_STATS:
2870 * i40evf_setup_tc_block_cb - block callback for tc
2871 * @type: type of offload
2872 * @type_data: offload data
2875 * This function is the block callback for traffic classes
2877 static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2881 case TC_SETUP_CLSFLOWER:
2882 return i40evf_setup_tc_cls_flower(cb_priv, type_data);
2889 * i40evf_setup_tc_block - register callbacks for tc
2890 * @netdev: network interface device structure
2891 * @f: tc offload data
2893 * This function registers block callbacks for tc
2896 static int i40evf_setup_tc_block(struct net_device *dev,
2897 struct tc_block_offload *f)
2899 struct i40evf_adapter *adapter = netdev_priv(dev);
2901 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2904 switch (f->command) {
2906 return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
2907 adapter, adapter, f->extack);
2908 case TC_BLOCK_UNBIND:
2909 tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
2918 * i40evf_setup_tc - configure multiple traffic classes
2919 * @netdev: network interface device structure
2920 * @type: type of offload
2921 * @type_date: tc offload data
2923 * This function is the callback to ndo_setup_tc in the
2926 * Returns 0 on success
2928 static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
2932 case TC_SETUP_QDISC_MQPRIO:
2933 return __i40evf_setup_tc(netdev, type_data);
2934 case TC_SETUP_BLOCK:
2935 return i40evf_setup_tc_block(netdev, type_data);
2942 * i40evf_open - Called when a network interface is made active
2943 * @netdev: network interface device structure
2945 * Returns 0 on success, negative value on failure
2947 * The open entry point is called when a network interface is made
2948 * active by the system (IFF_UP). At this point all resources needed
2949 * for transmit and receive operations are allocated, the interrupt
2950 * handler is registered with the OS, the watchdog timer is started,
2951 * and the stack is notified that the interface is ready.
2953 static int i40evf_open(struct net_device *netdev)
2955 struct i40evf_adapter *adapter = netdev_priv(netdev);
2958 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
2959 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
2963 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
2964 &adapter->crit_section))
2965 usleep_range(500, 1000);
2967 if (adapter->state != __I40EVF_DOWN) {
2972 /* allocate transmit descriptors */
2973 err = i40evf_setup_all_tx_resources(adapter);
2977 /* allocate receive descriptors */
2978 err = i40evf_setup_all_rx_resources(adapter);
2982 /* clear any pending interrupts, may auto mask */
2983 err = i40evf_request_traffic_irqs(adapter, netdev->name);
2987 spin_lock_bh(&adapter->mac_vlan_list_lock);
2989 i40evf_add_filter(adapter, adapter->hw.mac.addr);
2991 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2993 i40evf_configure(adapter);
2995 i40evf_up_complete(adapter);
2997 i40evf_irq_enable(adapter, true);
2999 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
3004 i40evf_down(adapter);
3005 i40evf_free_traffic_irqs(adapter);
3007 i40evf_free_all_rx_resources(adapter);
3009 i40evf_free_all_tx_resources(adapter);
3011 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
3017 * i40evf_close - Disables a network interface
3018 * @netdev: network interface device structure
3020 * Returns 0, this is not allowed to fail
3022 * The close entry point is called when an interface is de-activated
3023 * by the OS. The hardware is still under the drivers control, but
3024 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
3025 * are freed, along with all transmit and receive resources.
3027 static int i40evf_close(struct net_device *netdev)
3029 struct i40evf_adapter *adapter = netdev_priv(netdev);
3032 if (adapter->state <= __I40EVF_DOWN_PENDING)
3035 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
3036 &adapter->crit_section))
3037 usleep_range(500, 1000);
3039 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
3040 if (CLIENT_ENABLED(adapter))
3041 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
3043 i40evf_down(adapter);
3044 adapter->state = __I40EVF_DOWN_PENDING;
3045 i40evf_free_traffic_irqs(adapter);
3047 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
3049 /* We explicitly don't free resources here because the hardware is
3050 * still active and can DMA into memory. Resources are cleared in
3051 * i40evf_virtchnl_completion() after we get confirmation from the PF
3052 * driver that the rings have been stopped.
3054 * Also, we wait for state to transition to __I40EVF_DOWN before
3055 * returning. State change occurs in i40evf_virtchnl_completion() after
3056 * VF resources are released (which occurs after PF driver processes and
3057 * responds to admin queue commands).
3060 status = wait_event_timeout(adapter->down_waitqueue,
3061 adapter->state == __I40EVF_DOWN,
3062 msecs_to_jiffies(200));
3064 netdev_warn(netdev, "Device resources not yet released\n");
3069 * i40evf_change_mtu - Change the Maximum Transfer Unit
3070 * @netdev: network interface device structure
3071 * @new_mtu: new value for maximum frame size
3073 * Returns 0 on success, negative on failure
3075 static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
3077 struct i40evf_adapter *adapter = netdev_priv(netdev);
3079 netdev->mtu = new_mtu;
3080 if (CLIENT_ENABLED(adapter)) {
3081 i40evf_notify_client_l2_params(&adapter->vsi);
3082 adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
3084 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
3085 schedule_work(&adapter->reset_task);
3091 * i40e_set_features - set the netdev feature flags
3092 * @netdev: ptr to the netdev being adjusted
3093 * @features: the feature set that the stack is suggesting
3094 * Note: expects to be called while under rtnl_lock()
3096 static int i40evf_set_features(struct net_device *netdev,
3097 netdev_features_t features)
3099 struct i40evf_adapter *adapter = netdev_priv(netdev);
3101 /* Don't allow changing VLAN_RX flag when adapter is not capable
3104 if (!VLAN_ALLOWED(adapter)) {
3105 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
3107 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
3108 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3109 adapter->aq_required |=
3110 I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
3112 adapter->aq_required |=
3113 I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
3120 * i40evf_features_check - Validate encapsulated packet conforms to limits
3122 * @dev: This physical port's netdev
3123 * @features: Offload features that the stack believes apply
3125 static netdev_features_t i40evf_features_check(struct sk_buff *skb,
3126 struct net_device *dev,
3127 netdev_features_t features)
3131 /* No point in doing any of this if neither checksum nor GSO are
3132 * being requested for this frame. We can rule out both by just
3133 * checking for CHECKSUM_PARTIAL
3135 if (skb->ip_summed != CHECKSUM_PARTIAL)
3138 /* We cannot support GSO if the MSS is going to be less than
3139 * 64 bytes. If it is then we need to drop support for GSO.
3141 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3142 features &= ~NETIF_F_GSO_MASK;
3144 /* MACLEN can support at most 63 words */
3145 len = skb_network_header(skb) - skb->data;
3146 if (len & ~(63 * 2))
3149 /* IPLEN and EIPLEN can support at most 127 dwords */
3150 len = skb_transport_header(skb) - skb_network_header(skb);
3151 if (len & ~(127 * 4))
3154 if (skb->encapsulation) {
3155 /* L4TUNLEN can support 127 words */
3156 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3157 if (len & ~(127 * 2))
3160 /* IPLEN can support at most 127 dwords */
3161 len = skb_inner_transport_header(skb) -
3162 skb_inner_network_header(skb);
3163 if (len & ~(127 * 4))
3167 /* No need to validate L4LEN as TCP is the only protocol with a
3168 * a flexible value and we support all possible values supported
3169 * by TCP, which is at most 15 dwords
3174 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3178 * i40evf_fix_features - fix up the netdev feature bits
3179 * @netdev: our net device
3180 * @features: desired feature bits
3182 * Returns fixed-up features bits
3184 static netdev_features_t i40evf_fix_features(struct net_device *netdev,
3185 netdev_features_t features)
3187 struct i40evf_adapter *adapter = netdev_priv(netdev);
3189 if (adapter->vf_res &&
3190 !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
3191 features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3192 NETIF_F_HW_VLAN_CTAG_RX |
3193 NETIF_F_HW_VLAN_CTAG_FILTER);
3198 static const struct net_device_ops i40evf_netdev_ops = {
3199 .ndo_open = i40evf_open,
3200 .ndo_stop = i40evf_close,
3201 .ndo_start_xmit = i40evf_xmit_frame,
3202 .ndo_set_rx_mode = i40evf_set_rx_mode,
3203 .ndo_validate_addr = eth_validate_addr,
3204 .ndo_set_mac_address = i40evf_set_mac,
3205 .ndo_change_mtu = i40evf_change_mtu,
3206 .ndo_tx_timeout = i40evf_tx_timeout,
3207 .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
3208 .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
3209 .ndo_features_check = i40evf_features_check,
3210 .ndo_fix_features = i40evf_fix_features,
3211 .ndo_set_features = i40evf_set_features,
3212 .ndo_setup_tc = i40evf_setup_tc,
3216 * i40evf_check_reset_complete - check that VF reset is complete
3217 * @hw: pointer to hw struct
3219 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
3221 static int i40evf_check_reset_complete(struct i40e_hw *hw)
3226 for (i = 0; i < 100; i++) {
3227 rstat = rd32(hw, I40E_VFGEN_RSTAT) &
3228 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
3229 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3230 (rstat == VIRTCHNL_VFR_COMPLETED))
3232 usleep_range(10, 20);
3238 * i40evf_process_config - Process the config information we got from the PF
3239 * @adapter: board private structure
3241 * Verify that we have a valid config struct, and set up our netdev features
3242 * and our VSI struct.
3244 int i40evf_process_config(struct i40evf_adapter *adapter)
3246 struct virtchnl_vf_resource *vfres = adapter->vf_res;
3247 int i, num_req_queues = adapter->num_req_queues;
3248 struct net_device *netdev = adapter->netdev;
3249 struct i40e_vsi *vsi = &adapter->vsi;
3250 netdev_features_t hw_enc_features;
3251 netdev_features_t hw_features;
3253 /* got VF config message back from PF, now we can parse it */
3254 for (i = 0; i < vfres->num_vsis; i++) {
3255 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
3256 adapter->vsi_res = &vfres->vsi_res[i];
3258 if (!adapter->vsi_res) {
3259 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3263 if (num_req_queues &&
3264 num_req_queues != adapter->vsi_res->num_queue_pairs) {
3265 /* Problem. The PF gave us fewer queues than what we had
3266 * negotiated in our request. Need a reset to see if we can't
3267 * get back to a working state.
3269 dev_err(&adapter->pdev->dev,
3270 "Requested %d queues, but PF only gave us %d.\n",
3272 adapter->vsi_res->num_queue_pairs);
3273 adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
3274 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
3275 i40evf_schedule_reset(adapter);
3278 adapter->num_req_queues = 0;
3280 hw_enc_features = NETIF_F_SG |
3284 NETIF_F_SOFT_FEATURES |
3293 /* advertise to stack only if offloads for encapsulated packets is
3296 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
3297 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
3299 NETIF_F_GSO_GRE_CSUM |
3300 NETIF_F_GSO_IPXIP4 |
3301 NETIF_F_GSO_IPXIP6 |
3302 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3303 NETIF_F_GSO_PARTIAL |
3306 if (!(vfres->vf_cap_flags &
3307 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
3308 netdev->gso_partial_features |=
3309 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3311 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3312 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3313 netdev->hw_enc_features |= hw_enc_features;
3315 /* record features VLANs can make use of */
3316 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
3318 /* Write features and hw_features separately to avoid polluting
3319 * with, or dropping, features that are set when we registered.
3321 hw_features = hw_enc_features;
3323 /* Enable VLAN features if supported */
3324 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3325 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3326 NETIF_F_HW_VLAN_CTAG_RX);
3327 /* Enable cloud filter if ADQ is supported */
3328 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3329 hw_features |= NETIF_F_HW_TC;
3331 netdev->hw_features |= hw_features;
3333 netdev->features |= hw_features;
3335 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3336 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3338 netdev->priv_flags |= IFF_UNICAST_FLT;
3340 /* Do not turn on offloads when they are requested to be turned off.
3341 * TSO needs minimum 576 bytes to work correctly.
3343 if (netdev->wanted_features) {
3344 if (!(netdev->wanted_features & NETIF_F_TSO) ||
3346 netdev->features &= ~NETIF_F_TSO;
3347 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3349 netdev->features &= ~NETIF_F_TSO6;
3350 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3351 netdev->features &= ~NETIF_F_TSO_ECN;
3352 if (!(netdev->wanted_features & NETIF_F_GRO))
3353 netdev->features &= ~NETIF_F_GRO;
3354 if (!(netdev->wanted_features & NETIF_F_GSO))
3355 netdev->features &= ~NETIF_F_GSO;
3358 adapter->vsi.id = adapter->vsi_res->vsi_id;
3360 adapter->vsi.back = adapter;
3361 adapter->vsi.base_vector = 1;
3362 adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
3363 vsi->netdev = adapter->netdev;
3364 vsi->qs_handle = adapter->vsi_res->qset_handle;
3365 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
3366 adapter->rss_key_size = vfres->rss_key_size;
3367 adapter->rss_lut_size = vfres->rss_lut_size;
3369 adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
3370 adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
3377 * i40evf_init_task - worker thread to perform delayed initialization
3378 * @work: pointer to work_struct containing our data
3380 * This task completes the work that was begun in probe. Due to the nature
3381 * of VF-PF communications, we may need to wait tens of milliseconds to get
3382 * responses back from the PF. Rather than busy-wait in probe and bog down the
3383 * whole system, we'll do it in a task so we can sleep.
3384 * This task only runs during driver init. Once we've established
3385 * communications with the PF driver and set up our netdev, the watchdog
3388 static void i40evf_init_task(struct work_struct *work)
3390 struct i40evf_adapter *adapter = container_of(work,
3391 struct i40evf_adapter,
3393 struct net_device *netdev = adapter->netdev;
3394 struct i40e_hw *hw = &adapter->hw;
3395 struct pci_dev *pdev = adapter->pdev;
3398 switch (adapter->state) {
3399 case __I40EVF_STARTUP:
3400 /* driver loaded, probe complete */
3401 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
3402 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
3403 err = i40e_set_mac_type(hw);
3405 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
3409 err = i40evf_check_reset_complete(hw);
3411 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
3415 hw->aq.num_arq_entries = I40EVF_AQ_LEN;
3416 hw->aq.num_asq_entries = I40EVF_AQ_LEN;
3417 hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
3418 hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
3420 err = i40evf_init_adminq(hw);
3422 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
3426 err = i40evf_send_api_ver(adapter);
3428 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
3429 i40evf_shutdown_adminq(hw);
3432 adapter->state = __I40EVF_INIT_VERSION_CHECK;
3434 case __I40EVF_INIT_VERSION_CHECK:
3435 if (!i40evf_asq_done(hw)) {
3436 dev_err(&pdev->dev, "Admin queue command never completed\n");
3437 i40evf_shutdown_adminq(hw);
3438 adapter->state = __I40EVF_STARTUP;
3442 /* aq msg sent, awaiting reply */
3443 err = i40evf_verify_api_ver(adapter);
3445 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
3446 err = i40evf_send_api_ver(adapter);
3448 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
3449 adapter->pf_version.major,
3450 adapter->pf_version.minor,
3451 VIRTCHNL_VERSION_MAJOR,
3452 VIRTCHNL_VERSION_MINOR);
3455 err = i40evf_send_vf_config_msg(adapter);
3457 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
3461 adapter->state = __I40EVF_INIT_GET_RESOURCES;
3463 case __I40EVF_INIT_GET_RESOURCES:
3464 /* aq msg sent, awaiting reply */
3465 if (!adapter->vf_res) {
3466 bufsz = sizeof(struct virtchnl_vf_resource) +
3468 sizeof(struct virtchnl_vsi_resource));
3469 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
3470 if (!adapter->vf_res)
3473 err = i40evf_get_vf_config(adapter);
3474 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
3475 err = i40evf_send_vf_config_msg(adapter);
3477 } else if (err == I40E_ERR_PARAM) {
3478 /* We only get ERR_PARAM if the device is in a very bad
3479 * state or if we've been disabled for previous bad
3480 * behavior. Either way, we're done now.
3482 i40evf_shutdown_adminq(hw);
3483 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
3487 dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
3491 adapter->state = __I40EVF_INIT_SW;
3497 if (i40evf_process_config(adapter))
3499 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3501 adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
3503 netdev->netdev_ops = &i40evf_netdev_ops;
3504 i40evf_set_ethtool_ops(netdev);
3505 netdev->watchdog_timeo = 5 * HZ;
3507 /* MTU range: 68 - 9710 */
3508 netdev->min_mtu = ETH_MIN_MTU;
3509 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
3511 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
3512 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
3513 adapter->hw.mac.addr);
3514 eth_hw_addr_random(netdev);
3515 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
3517 adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
3518 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
3519 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
3522 timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0);
3523 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3525 adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
3526 adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
3527 err = i40evf_init_interrupt_scheme(adapter);
3530 i40evf_map_rings_to_vectors(adapter);
3531 if (adapter->vf_res->vf_cap_flags &
3532 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
3533 adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
3535 err = i40evf_request_misc_irq(adapter);
3539 netif_carrier_off(netdev);
3540 adapter->link_up = false;
3542 if (!adapter->netdev_registered) {
3543 err = register_netdev(netdev);
3548 adapter->netdev_registered = true;
3550 netif_tx_stop_all_queues(netdev);
3551 if (CLIENT_ALLOWED(adapter)) {
3552 err = i40evf_lan_add_device(adapter);
3554 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
3558 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
3559 if (netdev->features & NETIF_F_GRO)
3560 dev_info(&pdev->dev, "GRO is enabled\n");
3562 adapter->state = __I40EVF_DOWN;
3563 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
3564 i40evf_misc_irq_enable(adapter);
3565 wake_up(&adapter->down_waitqueue);
3567 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
3568 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
3569 if (!adapter->rss_key || !adapter->rss_lut)
3572 if (RSS_AQ(adapter)) {
3573 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
3574 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
3576 i40evf_init_rss(adapter);
3580 schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
3583 i40evf_free_rss(adapter);
3585 i40evf_free_misc_irq(adapter);
3587 i40evf_reset_interrupt_capability(adapter);
3589 kfree(adapter->vf_res);
3590 adapter->vf_res = NULL;
3592 /* Things went into the weeds, so try again later */
3593 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
3594 dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
3595 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
3596 i40evf_shutdown_adminq(hw);
3597 adapter->state = __I40EVF_STARTUP;
3598 schedule_delayed_work(&adapter->init_task, HZ * 5);
3601 schedule_delayed_work(&adapter->init_task, HZ);
3605 * i40evf_shutdown - Shutdown the device in preparation for a reboot
3606 * @pdev: pci device structure
3608 static void i40evf_shutdown(struct pci_dev *pdev)
3610 struct net_device *netdev = pci_get_drvdata(pdev);
3611 struct i40evf_adapter *adapter = netdev_priv(netdev);
3613 netif_device_detach(netdev);
3615 if (netif_running(netdev))
3616 i40evf_close(netdev);
3618 /* Prevent the watchdog from running. */
3619 adapter->state = __I40EVF_REMOVE;
3620 adapter->aq_required = 0;
3623 pci_save_state(pdev);
3626 pci_disable_device(pdev);
3630 * i40evf_probe - Device Initialization Routine
3631 * @pdev: PCI device information struct
3632 * @ent: entry in i40evf_pci_tbl
3634 * Returns 0 on success, negative on failure
3636 * i40evf_probe initializes an adapter identified by a pci_dev structure.
3637 * The OS initialization, configuring of the adapter private structure,
3638 * and a hardware reset occur.
3640 static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3642 struct net_device *netdev;
3643 struct i40evf_adapter *adapter = NULL;
3644 struct i40e_hw *hw = NULL;
3647 err = pci_enable_device(pdev);
3651 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3653 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3656 "DMA configuration failed: 0x%x\n", err);
3661 err = pci_request_regions(pdev, i40evf_driver_name);
3664 "pci_request_regions failed 0x%x\n", err);
3668 pci_enable_pcie_error_reporting(pdev);
3670 pci_set_master(pdev);
3672 netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
3673 I40EVF_MAX_REQ_QUEUES);
3676 goto err_alloc_etherdev;
3679 SET_NETDEV_DEV(netdev, &pdev->dev);
3681 pci_set_drvdata(pdev, netdev);
3682 adapter = netdev_priv(netdev);
3684 adapter->netdev = netdev;
3685 adapter->pdev = pdev;
3690 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3691 adapter->state = __I40EVF_STARTUP;
3693 /* Call save state here because it relies on the adapter struct. */
3694 pci_save_state(pdev);
3696 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3697 pci_resource_len(pdev, 0));
3702 hw->vendor_id = pdev->vendor;
3703 hw->device_id = pdev->device;
3704 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3705 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3706 hw->subsystem_device_id = pdev->subsystem_device;
3707 hw->bus.device = PCI_SLOT(pdev->devfn);
3708 hw->bus.func = PCI_FUNC(pdev->devfn);
3709 hw->bus.bus_id = pdev->bus->number;
3711 /* set up the locks for the AQ, do this only once in probe
3712 * and destroy them only once in remove
3714 mutex_init(&hw->aq.asq_mutex);
3715 mutex_init(&hw->aq.arq_mutex);
3717 spin_lock_init(&adapter->mac_vlan_list_lock);
3718 spin_lock_init(&adapter->cloud_filter_list_lock);
3720 INIT_LIST_HEAD(&adapter->mac_filter_list);
3721 INIT_LIST_HEAD(&adapter->vlan_filter_list);
3722 INIT_LIST_HEAD(&adapter->cloud_filter_list);
3724 INIT_WORK(&adapter->reset_task, i40evf_reset_task);
3725 INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
3726 INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
3727 INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
3728 INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
3729 schedule_delayed_work(&adapter->init_task,
3730 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
3732 /* Setup the wait queue for indicating transition to down status */
3733 init_waitqueue_head(&adapter->down_waitqueue);
3738 free_netdev(netdev);
3740 pci_disable_pcie_error_reporting(pdev);
3741 pci_release_regions(pdev);
3744 pci_disable_device(pdev);
3750 * i40evf_suspend - Power management suspend routine
3751 * @pdev: PCI device information struct
3754 * Called when the system (VM) is entering sleep/suspend.
3756 static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
3758 struct net_device *netdev = pci_get_drvdata(pdev);
3759 struct i40evf_adapter *adapter = netdev_priv(netdev);
3762 netif_device_detach(netdev);
3764 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
3765 &adapter->crit_section))
3766 usleep_range(500, 1000);
3768 if (netif_running(netdev)) {
3770 i40evf_down(adapter);
3773 i40evf_free_misc_irq(adapter);
3774 i40evf_reset_interrupt_capability(adapter);
3776 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
3778 retval = pci_save_state(pdev);
3782 pci_disable_device(pdev);
3788 * i40evf_resume - Power management resume routine
3789 * @pdev: PCI device information struct
3791 * Called when the system (VM) is resumed from sleep/suspend.
3793 static int i40evf_resume(struct pci_dev *pdev)
3795 struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
3796 struct net_device *netdev = adapter->netdev;
3799 pci_set_power_state(pdev, PCI_D0);
3800 pci_restore_state(pdev);
3801 /* pci_restore_state clears dev->state_saved so call
3802 * pci_save_state to restore it.
3804 pci_save_state(pdev);
3806 err = pci_enable_device_mem(pdev);
3808 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
3811 pci_set_master(pdev);
3814 err = i40evf_set_interrupt_capability(adapter);
3817 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3820 err = i40evf_request_misc_irq(adapter);
3823 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3827 schedule_work(&adapter->reset_task);
3829 netif_device_attach(netdev);
3834 #endif /* CONFIG_PM */
3836 * i40evf_remove - Device Removal Routine
3837 * @pdev: PCI device information struct
3839 * i40evf_remove is called by the PCI subsystem to alert the driver
3840 * that it should release a PCI device. The could be caused by a
3841 * Hot-Plug event, or because the driver is going to be removed from
3844 static void i40evf_remove(struct pci_dev *pdev)
3846 struct net_device *netdev = pci_get_drvdata(pdev);
3847 struct i40evf_adapter *adapter = netdev_priv(netdev);
3848 struct i40evf_vlan_filter *vlf, *vlftmp;
3849 struct i40evf_mac_filter *f, *ftmp;
3850 struct i40evf_cloud_filter *cf, *cftmp;
3851 struct i40e_hw *hw = &adapter->hw;
3853 /* Indicate we are in remove and not to run reset_task */
3854 set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section);
3855 cancel_delayed_work_sync(&adapter->init_task);
3856 cancel_work_sync(&adapter->reset_task);
3857 cancel_delayed_work_sync(&adapter->client_task);
3858 if (adapter->netdev_registered) {
3859 unregister_netdev(netdev);
3860 adapter->netdev_registered = false;
3862 if (CLIENT_ALLOWED(adapter)) {
3863 err = i40evf_lan_del_device(adapter);
3865 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
3869 /* Shut down all the garbage mashers on the detention level */
3870 adapter->state = __I40EVF_REMOVE;
3871 adapter->aq_required = 0;
3872 adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
3873 i40evf_request_reset(adapter);
3875 /* If the FW isn't responding, kick it once, but only once. */
3876 if (!i40evf_asq_done(hw)) {
3877 i40evf_request_reset(adapter);
3880 i40evf_free_all_tx_resources(adapter);
3881 i40evf_free_all_rx_resources(adapter);
3882 i40evf_misc_irq_disable(adapter);
3883 i40evf_free_misc_irq(adapter);
3884 i40evf_reset_interrupt_capability(adapter);
3885 i40evf_free_q_vectors(adapter);
3887 if (adapter->watchdog_timer.function)
3888 del_timer_sync(&adapter->watchdog_timer);
3890 cancel_work_sync(&adapter->adminq_task);
3892 i40evf_free_rss(adapter);
3894 if (hw->aq.asq.count)
3895 i40evf_shutdown_adminq(hw);
3897 /* destroy the locks only once, here */
3898 mutex_destroy(&hw->aq.arq_mutex);
3899 mutex_destroy(&hw->aq.asq_mutex);
3901 iounmap(hw->hw_addr);
3902 pci_release_regions(pdev);
3903 i40evf_free_all_tx_resources(adapter);
3904 i40evf_free_all_rx_resources(adapter);
3905 i40evf_free_queues(adapter);
3906 kfree(adapter->vf_res);
3907 spin_lock_bh(&adapter->mac_vlan_list_lock);
3908 /* If we got removed before an up/down sequence, we've got a filter
3909 * hanging out there that we need to get rid of.
3911 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3915 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
3917 list_del(&vlf->list);
3921 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3923 spin_lock_bh(&adapter->cloud_filter_list_lock);
3924 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
3925 list_del(&cf->list);
3928 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3930 free_netdev(netdev);
3932 pci_disable_pcie_error_reporting(pdev);
3934 pci_disable_device(pdev);
3937 static struct pci_driver i40evf_driver = {
3938 .name = i40evf_driver_name,
3939 .id_table = i40evf_pci_tbl,
3940 .probe = i40evf_probe,
3941 .remove = i40evf_remove,
3943 .suspend = i40evf_suspend,
3944 .resume = i40evf_resume,
3946 .shutdown = i40evf_shutdown,
3950 * i40e_init_module - Driver Registration Routine
3952 * i40e_init_module is the first routine called when the driver is
3953 * loaded. All it does is register with the PCI subsystem.
3955 static int __init i40evf_init_module(void)
3959 pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
3960 i40evf_driver_version);
3962 pr_info("%s\n", i40evf_copyright);
3964 i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3965 i40evf_driver_name);
3967 pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
3970 ret = pci_register_driver(&i40evf_driver);
3974 module_init(i40evf_init_module);
3977 * i40e_exit_module - Driver Exit Cleanup Routine
3979 * i40e_exit_module is called just before the driver is removed
3982 static void __exit i40evf_exit_module(void)
3984 pci_unregister_driver(&i40evf_driver);
3985 destroy_workqueue(i40evf_wq);
3988 module_exit(i40evf_exit_module);