2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <linux/uaccess.h>
67 #include <linux/crash_dump.h>
68 #include <net/udp_tunnel.h>
71 #include "cxgb4_filter.h"
73 #include "t4_values.h"
76 #include "t4fw_version.h"
77 #include "cxgb4_dcb.h"
79 #include "cxgb4_debugfs.h"
84 #include "cxgb4_tc_u32.h"
85 #include "cxgb4_tc_flower.h"
86 #include "cxgb4_ptp.h"
87 #include "cxgb4_cudbg.h"
89 char cxgb4_driver_name[] = KBUILD_MODNAME;
94 #define DRV_VERSION "2.0.0-ko"
95 const char cxgb4_driver_version[] = DRV_VERSION;
96 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
98 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
99 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
100 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
102 /* Macros needed to support the PCI Device ID Table ...
104 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
105 static const struct pci_device_id cxgb4_pci_tbl[] = {
106 #define CXGB4_UNIFIED_PF 0x4
108 #define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
110 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
113 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
115 #define CH_PCI_ID_TABLE_ENTRY(devid) \
116 {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
118 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
122 #include "t4_pci_id_tbl.h"
124 #define FW4_FNAME "/*(DEBLOBBED)*/"
125 #define FW5_FNAME "/*(DEBLOBBED)*/"
126 #define FW6_FNAME "/*(DEBLOBBED)*/"
127 #define FW4_CFNAME "cxgb4/t4-config.txt"
128 #define FW5_CFNAME "cxgb4/t5-config.txt"
129 #define FW6_CFNAME "cxgb4/t6-config.txt"
130 #define PHY_AQ1202_FIRMWARE "/*(DEBLOBBED)*/"
131 #define PHY_BCM84834_FIRMWARE "/*(DEBLOBBED)*/"
132 #define PHY_AQ1202_DEVICEID 0x4409
133 #define PHY_BCM84834_DEVICEID 0x4486
135 MODULE_DESCRIPTION(DRV_DESC);
136 MODULE_AUTHOR("Chelsio Communications");
137 MODULE_LICENSE("Dual BSD/GPL");
138 MODULE_VERSION(DRV_VERSION);
139 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
143 * The driver uses the best interrupt scheme available on a platform in the
144 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
145 * of these schemes the driver may consider as follows:
147 * msi = 2: choose from among all three options
148 * msi = 1: only consider MSI and INTx interrupts
149 * msi = 0: force INTx interrupts
153 module_param(msi, int, 0644);
154 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
157 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
158 * offset by 2 bytes in order to have the IP headers line up on 4-byte
159 * boundaries. This is a requirement for many architectures which will throw
160 * a machine check fault if an attempt is made to access one of the 4-byte IP
161 * header fields on a non-4-byte boundary. And it's a major performance issue
162 * even on some architectures which allow it like some implementations of the
163 * x86 ISA. However, some architectures don't mind this and for some very
164 * edge-case performance sensitive applications (like forwarding large volumes
165 * of small packets), setting this DMA offset to 0 will decrease the number of
166 * PCI-E Bus transfers enough to measurably affect performance.
168 static int rx_dma_offset = 2;
170 /* TX Queue select used to determine what algorithm to use for selecting TX
171 * queue. Select between the kernel provided function (select_queue=0) or user
172 * cxgb_select_queue function (select_queue=1)
174 * Default: select_queue=0
176 static int select_queue;
177 module_param(select_queue, int, 0644);
178 MODULE_PARM_DESC(select_queue,
179 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
181 static struct dentry *cxgb4_debugfs_root;
183 LIST_HEAD(adapter_list);
184 DEFINE_MUTEX(uld_mutex);
186 static void link_report(struct net_device *dev)
188 if (!netif_carrier_ok(dev))
189 netdev_info(dev, "link down\n");
191 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
194 const struct port_info *p = netdev_priv(dev);
196 switch (p->link_cfg.speed) {
219 pr_info("%s: unsupported speed: %d\n",
220 dev->name, p->link_cfg.speed);
224 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
229 #ifdef CONFIG_CHELSIO_T4_DCB
230 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
231 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
233 struct port_info *pi = netdev_priv(dev);
234 struct adapter *adap = pi->adapter;
235 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
238 /* We use a simple mapping of Port TX Queue Index to DCB
239 * Priority when we're enabling DCB.
241 for (i = 0; i < pi->nqsets; i++, txq++) {
245 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
247 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
248 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
249 value = enable ? i : 0xffffffff;
251 /* Since we can be called while atomic (from "interrupt
252 * level") we need to issue the Set Parameters Commannd
253 * without sleeping (timeout < 0).
255 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
257 -FW_CMD_MAX_TIMEOUT);
260 dev_err(adap->pdev_dev,
261 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
262 enable ? "set" : "unset", pi->port_id, i, -err);
264 txq->dcb_prio = enable ? value : 0;
268 int cxgb4_dcb_enabled(const struct net_device *dev)
270 struct port_info *pi = netdev_priv(dev);
272 if (!pi->dcb.enabled)
275 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
276 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
278 #endif /* CONFIG_CHELSIO_T4_DCB */
280 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
282 struct net_device *dev = adapter->port[port_id];
284 /* Skip changes from disabled ports. */
285 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
287 netif_carrier_on(dev);
289 #ifdef CONFIG_CHELSIO_T4_DCB
290 if (cxgb4_dcb_enabled(dev)) {
291 cxgb4_dcb_reset(dev);
292 dcb_tx_queue_prio_enable(dev, false);
294 #endif /* CONFIG_CHELSIO_T4_DCB */
295 netif_carrier_off(dev);
302 void t4_os_portmod_changed(struct adapter *adap, int port_id)
304 static const char *mod_str[] = {
305 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
308 struct net_device *dev = adap->port[port_id];
309 struct port_info *pi = netdev_priv(dev);
311 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
312 netdev_info(dev, "port module unplugged\n");
313 else if (pi->mod_type < ARRAY_SIZE(mod_str))
314 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
315 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
316 netdev_info(dev, "%s: unsupported port module inserted\n",
318 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
319 netdev_info(dev, "%s: unknown port module inserted\n",
321 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
322 netdev_info(dev, "%s: transceiver module error\n", dev->name);
324 netdev_info(dev, "%s: unknown module type %d inserted\n",
325 dev->name, pi->mod_type);
327 /* If the interface is running, then we'll need any "sticky" Link
328 * Parameters redone with a new Transceiver Module.
330 pi->link_cfg.redo_l1cfg = netif_running(dev);
333 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
334 module_param(dbfifo_int_thresh, int, 0644);
335 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
338 * usecs to sleep while draining the dbfifo
340 static int dbfifo_drain_delay = 1000;
341 module_param(dbfifo_drain_delay, int, 0644);
342 MODULE_PARM_DESC(dbfifo_drain_delay,
343 "usecs to sleep while draining the dbfifo");
345 static inline int cxgb4_set_addr_hash(struct port_info *pi)
347 struct adapter *adap = pi->adapter;
350 struct hash_mac_addr *entry;
352 /* Calculate the hash vector for the updated list and program it */
353 list_for_each_entry(entry, &adap->mac_hlist, list) {
354 ucast |= is_unicast_ether_addr(entry->addr);
355 vec |= (1ULL << hash_mac_addr(entry->addr));
357 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
361 static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
363 struct port_info *pi = netdev_priv(netdev);
364 struct adapter *adap = pi->adapter;
369 bool ucast = is_unicast_ether_addr(mac_addr);
370 const u8 *maclist[1] = {mac_addr};
371 struct hash_mac_addr *new_entry;
373 ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
374 NULL, ucast ? &uhash : &mhash, false);
377 /* if hash != 0, then add the addr to hash addr list
378 * so on the end we will calculate the hash for the
379 * list and program it
381 if (uhash || mhash) {
382 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
385 ether_addr_copy(new_entry->addr, mac_addr);
386 list_add_tail(&new_entry->list, &adap->mac_hlist);
387 ret = cxgb4_set_addr_hash(pi);
390 return ret < 0 ? ret : 0;
393 static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
395 struct port_info *pi = netdev_priv(netdev);
396 struct adapter *adap = pi->adapter;
398 const u8 *maclist[1] = {mac_addr};
399 struct hash_mac_addr *entry, *tmp;
401 /* If the MAC address to be removed is in the hash addr
402 * list, delete it from the list and update hash vector
404 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
405 if (ether_addr_equal(entry->addr, mac_addr)) {
406 list_del(&entry->list);
408 return cxgb4_set_addr_hash(pi);
412 ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
413 return ret < 0 ? -EINVAL : 0;
417 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
418 * If @mtu is -1 it is left unchanged.
420 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
422 struct port_info *pi = netdev_priv(dev);
423 struct adapter *adapter = pi->adapter;
425 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
426 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
428 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
429 (dev->flags & IFF_PROMISC) ? 1 : 0,
430 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
435 * link_start - enable a port
436 * @dev: the port to enable
438 * Performs the MAC and PHY actions needed to enable a port.
440 static int link_start(struct net_device *dev)
443 struct port_info *pi = netdev_priv(dev);
444 unsigned int mb = pi->adapter->pf;
447 * We do not set address filters and promiscuity here, the stack does
448 * that step explicitly.
450 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
451 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
453 ret = t4_change_mac(pi->adapter, mb, pi->viid,
454 pi->xact_addr_filt, dev->dev_addr, true,
457 pi->xact_addr_filt = ret;
462 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
466 ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
467 true, CXGB4_DCB_ENABLED);
474 #ifdef CONFIG_CHELSIO_T4_DCB
475 /* Handle a Data Center Bridging update message from the firmware. */
476 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
478 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
479 struct net_device *dev = adap->port[adap->chan_map[port]];
480 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
483 cxgb4_dcb_handle_fw_update(adap, pcmd);
484 new_dcb_enabled = cxgb4_dcb_enabled(dev);
486 /* If the DCB has become enabled or disabled on the port then we're
487 * going to need to set up/tear down DCB Priority parameters for the
488 * TX Queues associated with the port.
490 if (new_dcb_enabled != old_dcb_enabled)
491 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
493 #endif /* CONFIG_CHELSIO_T4_DCB */
495 /* Response queue handler for the FW event queue.
497 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
498 const struct pkt_gl *gl)
500 u8 opcode = ((const struct rss_header *)rsp)->opcode;
502 rsp++; /* skip RSS header */
504 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
506 if (unlikely(opcode == CPL_FW4_MSG &&
507 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
509 opcode = ((const struct rss_header *)rsp)->opcode;
511 if (opcode != CPL_SGE_EGR_UPDATE) {
512 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
518 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
519 const struct cpl_sge_egr_update *p = (void *)rsp;
520 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
523 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
525 if (txq->q_type == CXGB4_TXQ_ETH) {
526 struct sge_eth_txq *eq;
528 eq = container_of(txq, struct sge_eth_txq, q);
529 netif_tx_wake_queue(eq->txq);
531 struct sge_uld_txq *oq;
533 oq = container_of(txq, struct sge_uld_txq, q);
534 tasklet_schedule(&oq->qresume_tsk);
536 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
537 const struct cpl_fw6_msg *p = (void *)rsp;
539 #ifdef CONFIG_CHELSIO_T4_DCB
540 const struct fw_port_cmd *pcmd = (const void *)p->data;
541 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
542 unsigned int action =
543 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
545 if (cmd == FW_PORT_CMD &&
546 (action == FW_PORT_ACTION_GET_PORT_INFO ||
547 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
548 int port = FW_PORT_CMD_PORTID_G(
549 be32_to_cpu(pcmd->op_to_portid));
550 struct net_device *dev;
551 int dcbxdis, state_input;
553 dev = q->adap->port[q->adap->chan_map[port]];
554 dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
555 ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
556 : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
557 & FW_PORT_CMD_DCBXDIS32_F));
558 state_input = (dcbxdis
559 ? CXGB4_DCB_INPUT_FW_DISABLED
560 : CXGB4_DCB_INPUT_FW_ENABLED);
562 cxgb4_dcb_state_fsm(dev, state_input);
565 if (cmd == FW_PORT_CMD &&
566 action == FW_PORT_ACTION_L2_DCB_CFG)
567 dcb_rpl(q->adap, pcmd);
571 t4_handle_fw_rpl(q->adap, p->data);
572 } else if (opcode == CPL_L2T_WRITE_RPL) {
573 const struct cpl_l2t_write_rpl *p = (void *)rsp;
575 do_l2t_write_rpl(q->adap, p);
576 } else if (opcode == CPL_SMT_WRITE_RPL) {
577 const struct cpl_smt_write_rpl *p = (void *)rsp;
579 do_smt_write_rpl(q->adap, p);
580 } else if (opcode == CPL_SET_TCB_RPL) {
581 const struct cpl_set_tcb_rpl *p = (void *)rsp;
583 filter_rpl(q->adap, p);
584 } else if (opcode == CPL_ACT_OPEN_RPL) {
585 const struct cpl_act_open_rpl *p = (void *)rsp;
587 hash_filter_rpl(q->adap, p);
588 } else if (opcode == CPL_ABORT_RPL_RSS) {
589 const struct cpl_abort_rpl_rss *p = (void *)rsp;
591 hash_del_filter_rpl(q->adap, p);
592 } else if (opcode == CPL_SRQ_TABLE_RPL) {
593 const struct cpl_srq_table_rpl *p = (void *)rsp;
595 do_srq_table_rpl(q->adap, p);
597 dev_err(q->adap->pdev_dev,
598 "unexpected CPL %#x on FW event queue\n", opcode);
603 static void disable_msi(struct adapter *adapter)
605 if (adapter->flags & USING_MSIX) {
606 pci_disable_msix(adapter->pdev);
607 adapter->flags &= ~USING_MSIX;
608 } else if (adapter->flags & USING_MSI) {
609 pci_disable_msi(adapter->pdev);
610 adapter->flags &= ~USING_MSI;
615 * Interrupt handler for non-data events used with MSI-X.
617 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
619 struct adapter *adap = cookie;
620 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
624 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
626 if (adap->flags & MASTER_PF)
627 t4_slow_intr_handler(adap);
632 * Name the MSI-X interrupts.
634 static void name_msix_vecs(struct adapter *adap)
636 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
638 /* non-data interrupts */
639 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
642 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
643 adap->port[0]->name);
645 /* Ethernet queues */
646 for_each_port(adap, j) {
647 struct net_device *d = adap->port[j];
648 const struct port_info *pi = netdev_priv(d);
650 for (i = 0; i < pi->nqsets; i++, msi_idx++)
651 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
656 static int request_msix_queue_irqs(struct adapter *adap)
658 struct sge *s = &adap->sge;
662 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
663 adap->msix_info[1].desc, &s->fw_evtq);
667 for_each_ethrxq(s, ethqidx) {
668 err = request_irq(adap->msix_info[msi_index].vec,
670 adap->msix_info[msi_index].desc,
671 &s->ethrxq[ethqidx].rspq);
679 while (--ethqidx >= 0)
680 free_irq(adap->msix_info[--msi_index].vec,
681 &s->ethrxq[ethqidx].rspq);
682 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
686 static void free_msix_queue_irqs(struct adapter *adap)
688 int i, msi_index = 2;
689 struct sge *s = &adap->sge;
691 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
692 for_each_ethrxq(s, i)
693 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
697 * cxgb4_write_rss - write the RSS table for a given port
699 * @queues: array of queue indices for RSS
701 * Sets up the portion of the HW RSS table for the port's VI to distribute
702 * packets to the Rx queues in @queues.
703 * Should never be called before setting up sge eth rx queues
705 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
709 struct adapter *adapter = pi->adapter;
710 const struct sge_eth_rxq *rxq;
712 rxq = &adapter->sge.ethrxq[pi->first_qset];
713 rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
717 /* map the queue indices to queue ids */
718 for (i = 0; i < pi->rss_size; i++, queues++)
719 rss[i] = rxq[*queues].rspq.abs_id;
721 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
722 pi->rss_size, rss, pi->rss_size);
723 /* If Tunnel All Lookup isn't specified in the global RSS
724 * Configuration, then we need to specify a default Ingress
725 * Queue for any ingress packets which aren't hashed. We'll
726 * use our first ingress queue ...
729 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
730 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
731 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
732 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
733 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
734 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
741 * setup_rss - configure RSS
744 * Sets up RSS for each port.
746 static int setup_rss(struct adapter *adap)
750 for_each_port(adap, i) {
751 const struct port_info *pi = adap2pinfo(adap, i);
753 /* Fill default values with equal distribution */
754 for (j = 0; j < pi->rss_size; j++)
755 pi->rss[j] = j % pi->nqsets;
757 err = cxgb4_write_rss(pi, pi->rss);
765 * Return the channel of the ingress queue with the given qid.
767 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
769 qid -= p->ingr_start;
770 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
774 * Wait until all NAPI handlers are descheduled.
776 static void quiesce_rx(struct adapter *adap)
780 for (i = 0; i < adap->sge.ingr_sz; i++) {
781 struct sge_rspq *q = adap->sge.ingr_map[i];
784 napi_disable(&q->napi);
788 /* Disable interrupt and napi handler */
789 static void disable_interrupts(struct adapter *adap)
791 if (adap->flags & FULL_INIT_DONE) {
792 t4_intr_disable(adap);
793 if (adap->flags & USING_MSIX) {
794 free_msix_queue_irqs(adap);
795 free_irq(adap->msix_info[0].vec, adap);
797 free_irq(adap->pdev->irq, adap);
804 * Enable NAPI scheduling and interrupt generation for all Rx queues.
806 static void enable_rx(struct adapter *adap)
810 for (i = 0; i < adap->sge.ingr_sz; i++) {
811 struct sge_rspq *q = adap->sge.ingr_map[i];
816 napi_enable(&q->napi);
818 /* 0-increment GTS to start the timer and enable interrupts */
819 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
820 SEINTARM_V(q->intr_params) |
821 INGRESSQID_V(q->cntxt_id));
826 static int setup_fw_sge_queues(struct adapter *adap)
828 struct sge *s = &adap->sge;
831 bitmap_zero(s->starving_fl, s->egr_sz);
832 bitmap_zero(s->txq_maperr, s->egr_sz);
834 if (adap->flags & USING_MSIX)
835 adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
837 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
838 NULL, NULL, NULL, -1);
841 adap->msi_idx = -((int)s->intrq.abs_id + 1);
844 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
845 adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
850 * setup_sge_queues - configure SGE Tx/Rx/response queues
853 * Determines how many sets of SGE queues to use and initializes them.
854 * We support multiple queue sets per port if we have MSI-X, otherwise
855 * just one queue set per port.
857 static int setup_sge_queues(struct adapter *adap)
860 struct sge *s = &adap->sge;
861 struct sge_uld_rxq_info *rxq_info = NULL;
862 unsigned int cmplqid = 0;
865 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
867 for_each_port(adap, i) {
868 struct net_device *dev = adap->port[i];
869 struct port_info *pi = netdev_priv(dev);
870 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
871 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
873 for (j = 0; j < pi->nqsets; j++, q++) {
874 if (adap->msi_idx > 0)
876 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
877 adap->msi_idx, &q->fl,
880 t4_get_tp_ch_map(adap,
885 memset(&q->stats, 0, sizeof(q->stats));
887 for (j = 0; j < pi->nqsets; j++, t++) {
888 err = t4_sge_alloc_eth_txq(adap, t, dev,
889 netdev_get_tx_queue(dev, j),
890 s->fw_evtq.cntxt_id);
896 for_each_port(adap, i) {
897 /* Note that cmplqid below is 0 if we don't
898 * have RDMA queues, and that's the right value.
901 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
903 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
904 s->fw_evtq.cntxt_id, cmplqid);
909 if (!is_t4(adap->params.chip)) {
910 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
911 netdev_get_tx_queue(adap->port[0], 0)
912 , s->fw_evtq.cntxt_id);
917 t4_write_reg(adap, is_t4(adap->params.chip) ?
918 MPS_TRC_RSS_CONTROL_A :
919 MPS_T5_TRC_RSS_CONTROL_A,
920 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
921 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
924 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
925 t4_free_sge_resources(adap);
929 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
930 struct net_device *sb_dev,
931 select_queue_fallback_t fallback)
935 #ifdef CONFIG_CHELSIO_T4_DCB
936 /* If a Data Center Bridging has been successfully negotiated on this
937 * link then we'll use the skb's priority to map it to a TX Queue.
938 * The skb's priority is determined via the VLAN Tag Priority Code
941 if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
945 err = vlan_get_tag(skb, &vlan_tci);
949 "TX Packet without VLAN Tag on DCB Link\n");
952 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
953 #ifdef CONFIG_CHELSIO_T4_FCOE
954 if (skb->protocol == htons(ETH_P_FCOE))
955 txq = skb->priority & 0x7;
956 #endif /* CONFIG_CHELSIO_T4_FCOE */
960 #endif /* CONFIG_CHELSIO_T4_DCB */
963 txq = (skb_rx_queue_recorded(skb)
964 ? skb_get_rx_queue(skb)
965 : smp_processor_id());
967 while (unlikely(txq >= dev->real_num_tx_queues))
968 txq -= dev->real_num_tx_queues;
973 return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
976 static int closest_timer(const struct sge *s, int time)
978 int i, delta, match = 0, min_delta = INT_MAX;
980 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
981 delta = time - s->timer_val[i];
984 if (delta < min_delta) {
992 static int closest_thres(const struct sge *s, int thres)
994 int i, delta, match = 0, min_delta = INT_MAX;
996 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
997 delta = thres - s->counter_val[i];
1000 if (delta < min_delta) {
1009 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1011 * @us: the hold-off time in us, or 0 to disable timer
1012 * @cnt: the hold-off packet count, or 0 to disable counter
1014 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1015 * one of the two needs to be enabled for the queue to generate interrupts.
1017 int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1018 unsigned int us, unsigned int cnt)
1020 struct adapter *adap = q->adap;
1022 if ((us | cnt) == 0)
1029 new_idx = closest_thres(&adap->sge, cnt);
1030 if (q->desc && q->pktcnt_idx != new_idx) {
1031 /* the queue has already been created, update it */
1032 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1033 FW_PARAMS_PARAM_X_V(
1034 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1035 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1036 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1041 q->pktcnt_idx = new_idx;
1044 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1045 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1049 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1051 const struct port_info *pi = netdev_priv(dev);
1052 netdev_features_t changed = dev->features ^ features;
1055 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1058 err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
1060 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1062 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1066 static int setup_debugfs(struct adapter *adap)
1068 if (IS_ERR_OR_NULL(adap->debugfs_root))
1071 #ifdef CONFIG_DEBUG_FS
1072 t4_setup_debugfs(adap);
1078 * upper-layer driver support
1082 * Allocate an active-open TID and set it to the supplied value.
1084 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1088 spin_lock_bh(&t->atid_lock);
1090 union aopen_entry *p = t->afree;
1092 atid = (p - t->atid_tab) + t->atid_base;
1097 spin_unlock_bh(&t->atid_lock);
1100 EXPORT_SYMBOL(cxgb4_alloc_atid);
1103 * Release an active-open TID.
1105 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1107 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1109 spin_lock_bh(&t->atid_lock);
1113 spin_unlock_bh(&t->atid_lock);
1115 EXPORT_SYMBOL(cxgb4_free_atid);
1118 * Allocate a server TID and set it to the supplied value.
1120 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1124 spin_lock_bh(&t->stid_lock);
1125 if (family == PF_INET) {
1126 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1127 if (stid < t->nstids)
1128 __set_bit(stid, t->stid_bmap);
1132 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1137 t->stid_tab[stid].data = data;
1138 stid += t->stid_base;
1139 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1140 * This is equivalent to 4 TIDs. With CLIP enabled it
1143 if (family == PF_INET6) {
1144 t->stids_in_use += 2;
1145 t->v6_stids_in_use += 2;
1150 spin_unlock_bh(&t->stid_lock);
1153 EXPORT_SYMBOL(cxgb4_alloc_stid);
1155 /* Allocate a server filter TID and set it to the supplied value.
1157 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1161 spin_lock_bh(&t->stid_lock);
1162 if (family == PF_INET) {
1163 stid = find_next_zero_bit(t->stid_bmap,
1164 t->nstids + t->nsftids, t->nstids);
1165 if (stid < (t->nstids + t->nsftids))
1166 __set_bit(stid, t->stid_bmap);
1173 t->stid_tab[stid].data = data;
1175 stid += t->sftid_base;
1178 spin_unlock_bh(&t->stid_lock);
1181 EXPORT_SYMBOL(cxgb4_alloc_sftid);
1183 /* Release a server TID.
1185 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1187 /* Is it a server filter TID? */
1188 if (t->nsftids && (stid >= t->sftid_base)) {
1189 stid -= t->sftid_base;
1192 stid -= t->stid_base;
1195 spin_lock_bh(&t->stid_lock);
1196 if (family == PF_INET)
1197 __clear_bit(stid, t->stid_bmap);
1199 bitmap_release_region(t->stid_bmap, stid, 1);
1200 t->stid_tab[stid].data = NULL;
1201 if (stid < t->nstids) {
1202 if (family == PF_INET6) {
1203 t->stids_in_use -= 2;
1204 t->v6_stids_in_use -= 2;
1212 spin_unlock_bh(&t->stid_lock);
1214 EXPORT_SYMBOL(cxgb4_free_stid);
1217 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1219 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1222 struct cpl_tid_release *req;
1224 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1225 req = __skb_put(skb, sizeof(*req));
1226 INIT_TP_WR(req, tid);
1227 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1231 * Queue a TID release request and if necessary schedule a work queue to
1234 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1237 void **p = &t->tid_tab[tid];
1238 struct adapter *adap = container_of(t, struct adapter, tids);
1240 spin_lock_bh(&adap->tid_release_lock);
1241 *p = adap->tid_release_head;
1242 /* Low 2 bits encode the Tx channel number */
1243 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1244 if (!adap->tid_release_task_busy) {
1245 adap->tid_release_task_busy = true;
1246 queue_work(adap->workq, &adap->tid_release_task);
1248 spin_unlock_bh(&adap->tid_release_lock);
1252 * Process the list of pending TID release requests.
1254 static void process_tid_release_list(struct work_struct *work)
1256 struct sk_buff *skb;
1257 struct adapter *adap;
1259 adap = container_of(work, struct adapter, tid_release_task);
1261 spin_lock_bh(&adap->tid_release_lock);
1262 while (adap->tid_release_head) {
1263 void **p = adap->tid_release_head;
1264 unsigned int chan = (uintptr_t)p & 3;
1265 p = (void *)p - chan;
1267 adap->tid_release_head = *p;
1269 spin_unlock_bh(&adap->tid_release_lock);
1271 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1273 schedule_timeout_uninterruptible(1);
1275 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1276 t4_ofld_send(adap, skb);
1277 spin_lock_bh(&adap->tid_release_lock);
1279 adap->tid_release_task_busy = false;
1280 spin_unlock_bh(&adap->tid_release_lock);
1284 * Release a TID and inform HW. If we are unable to allocate the release
1285 * message we defer to a work queue.
1287 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1288 unsigned short family)
1290 struct sk_buff *skb;
1291 struct adapter *adap = container_of(t, struct adapter, tids);
1293 WARN_ON(tid >= t->ntids);
1295 if (t->tid_tab[tid]) {
1296 t->tid_tab[tid] = NULL;
1297 atomic_dec(&t->conns_in_use);
1298 if (t->hash_base && (tid >= t->hash_base)) {
1299 if (family == AF_INET6)
1300 atomic_sub(2, &t->hash_tids_in_use);
1302 atomic_dec(&t->hash_tids_in_use);
1304 if (family == AF_INET6)
1305 atomic_sub(2, &t->tids_in_use);
1307 atomic_dec(&t->tids_in_use);
1311 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1313 mk_tid_release(skb, chan, tid);
1314 t4_ofld_send(adap, skb);
1316 cxgb4_queue_tid_release(t, chan, tid);
1318 EXPORT_SYMBOL(cxgb4_remove_tid);
1321 * Allocate and initialize the TID tables. Returns 0 on success.
1323 static int tid_init(struct tid_info *t)
1325 struct adapter *adap = container_of(t, struct adapter, tids);
1326 unsigned int max_ftids = t->nftids + t->nsftids;
1327 unsigned int natids = t->natids;
1328 unsigned int stid_bmap_size;
1329 unsigned int ftid_bmap_size;
1332 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1333 ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1334 size = t->ntids * sizeof(*t->tid_tab) +
1335 natids * sizeof(*t->atid_tab) +
1336 t->nstids * sizeof(*t->stid_tab) +
1337 t->nsftids * sizeof(*t->stid_tab) +
1338 stid_bmap_size * sizeof(long) +
1339 max_ftids * sizeof(*t->ftid_tab) +
1340 ftid_bmap_size * sizeof(long);
1342 t->tid_tab = kvzalloc(size, GFP_KERNEL);
1346 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1347 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1348 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1349 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1350 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1351 spin_lock_init(&t->stid_lock);
1352 spin_lock_init(&t->atid_lock);
1353 spin_lock_init(&t->ftid_lock);
1355 t->stids_in_use = 0;
1356 t->v6_stids_in_use = 0;
1357 t->sftids_in_use = 0;
1359 t->atids_in_use = 0;
1360 atomic_set(&t->tids_in_use, 0);
1361 atomic_set(&t->conns_in_use, 0);
1362 atomic_set(&t->hash_tids_in_use, 0);
1364 /* Setup the free list for atid_tab and clear the stid bitmap. */
1367 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1368 t->afree = t->atid_tab;
1371 if (is_offload(adap)) {
1372 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1373 /* Reserve stid 0 for T4/T5 adapters */
1374 if (!t->stid_base &&
1375 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1376 __set_bit(0, t->stid_bmap);
1379 bitmap_zero(t->ftid_bmap, t->nftids);
1384 * cxgb4_create_server - create an IP server
1386 * @stid: the server TID
1387 * @sip: local IP address to bind server to
1388 * @sport: the server's TCP port
1389 * @queue: queue to direct messages from this server to
1391 * Create an IP server for the given port and address.
1392 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1394 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1395 __be32 sip, __be16 sport, __be16 vlan,
1399 struct sk_buff *skb;
1400 struct adapter *adap;
1401 struct cpl_pass_open_req *req;
1404 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1408 adap = netdev2adap(dev);
1409 req = __skb_put(skb, sizeof(*req));
1411 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1412 req->local_port = sport;
1413 req->peer_port = htons(0);
1414 req->local_ip = sip;
1415 req->peer_ip = htonl(0);
1416 chan = rxq_to_chan(&adap->sge, queue);
1417 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1418 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1419 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1420 ret = t4_mgmt_tx(adap, skb);
1421 return net_xmit_eval(ret);
1423 EXPORT_SYMBOL(cxgb4_create_server);
1425 /* cxgb4_create_server6 - create an IPv6 server
1427 * @stid: the server TID
1428 * @sip: local IPv6 address to bind server to
1429 * @sport: the server's TCP port
1430 * @queue: queue to direct messages from this server to
1432 * Create an IPv6 server for the given port and address.
1433 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1435 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1436 const struct in6_addr *sip, __be16 sport,
1440 struct sk_buff *skb;
1441 struct adapter *adap;
1442 struct cpl_pass_open_req6 *req;
1445 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1449 adap = netdev2adap(dev);
1450 req = __skb_put(skb, sizeof(*req));
1452 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1453 req->local_port = sport;
1454 req->peer_port = htons(0);
1455 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1456 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1457 req->peer_ip_hi = cpu_to_be64(0);
1458 req->peer_ip_lo = cpu_to_be64(0);
1459 chan = rxq_to_chan(&adap->sge, queue);
1460 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1461 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1462 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1463 ret = t4_mgmt_tx(adap, skb);
1464 return net_xmit_eval(ret);
1466 EXPORT_SYMBOL(cxgb4_create_server6);
1468 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1469 unsigned int queue, bool ipv6)
1471 struct sk_buff *skb;
1472 struct adapter *adap;
1473 struct cpl_close_listsvr_req *req;
1476 adap = netdev2adap(dev);
1478 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1482 req = __skb_put(skb, sizeof(*req));
1484 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
1485 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1486 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
1487 ret = t4_mgmt_tx(adap, skb);
1488 return net_xmit_eval(ret);
1490 EXPORT_SYMBOL(cxgb4_remove_server);
1493 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1494 * @mtus: the HW MTU table
1495 * @mtu: the target MTU
1496 * @idx: index of selected entry in the MTU table
1498 * Returns the index and the value in the HW MTU table that is closest to
1499 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1500 * table, in which case that smallest available value is selected.
1502 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1507 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1513 EXPORT_SYMBOL(cxgb4_best_mtu);
1516 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1517 * @mtus: the HW MTU table
1518 * @header_size: Header Size
1519 * @data_size_max: maximum Data Segment Size
1520 * @data_size_align: desired Data Segment Size Alignment (2^N)
1521 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1523 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1524 * MTU Table based solely on a Maximum MTU parameter, we break that
1525 * parameter up into a Header Size and Maximum Data Segment Size, and
1526 * provide a desired Data Segment Size Alignment. If we find an MTU in
1527 * the Hardware MTU Table which will result in a Data Segment Size with
1528 * the requested alignment _and_ that MTU isn't "too far" from the
1529 * closest MTU, then we'll return that rather than the closest MTU.
1531 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1532 unsigned short header_size,
1533 unsigned short data_size_max,
1534 unsigned short data_size_align,
1535 unsigned int *mtu_idxp)
1537 unsigned short max_mtu = header_size + data_size_max;
1538 unsigned short data_size_align_mask = data_size_align - 1;
1539 int mtu_idx, aligned_mtu_idx;
1541 /* Scan the MTU Table till we find an MTU which is larger than our
1542 * Maximum MTU or we reach the end of the table. Along the way,
1543 * record the last MTU found, if any, which will result in a Data
1544 * Segment Length matching the requested alignment.
1546 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1547 unsigned short data_size = mtus[mtu_idx] - header_size;
1549 /* If this MTU minus the Header Size would result in a
1550 * Data Segment Size of the desired alignment, remember it.
1552 if ((data_size & data_size_align_mask) == 0)
1553 aligned_mtu_idx = mtu_idx;
1555 /* If we're not at the end of the Hardware MTU Table and the
1556 * next element is larger than our Maximum MTU, drop out of
1559 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1563 /* If we fell out of the loop because we ran to the end of the table,
1564 * then we just have to use the last [largest] entry.
1566 if (mtu_idx == NMTUS)
1569 /* If we found an MTU which resulted in the requested Data Segment
1570 * Length alignment and that's "not far" from the largest MTU which is
1571 * less than or equal to the maximum MTU, then use that.
1573 if (aligned_mtu_idx >= 0 &&
1574 mtu_idx - aligned_mtu_idx <= 1)
1575 mtu_idx = aligned_mtu_idx;
1577 /* If the caller has passed in an MTU Index pointer, pass the
1578 * MTU Index back. Return the MTU value.
1581 *mtu_idxp = mtu_idx;
1582 return mtus[mtu_idx];
1584 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1587 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1589 * @viid: VI id of the given port
1591 * Return the SMT index for this VI.
1593 unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
1595 /* In T4/T5, SMT contains 256 SMAC entries organized in
1596 * 128 rows of 2 entries each.
1597 * In T6, SMT contains 256 SMAC entries in 256 rows.
1598 * TODO: The below code needs to be updated when we add support
1601 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1602 return ((viid & 0x7f) << 1);
1604 return (viid & 0x7f);
1606 EXPORT_SYMBOL(cxgb4_tp_smt_idx);
1609 * cxgb4_port_chan - get the HW channel of a port
1610 * @dev: the net device for the port
1612 * Return the HW Tx channel of the given port.
1614 unsigned int cxgb4_port_chan(const struct net_device *dev)
1616 return netdev2pinfo(dev)->tx_chan;
1618 EXPORT_SYMBOL(cxgb4_port_chan);
1620 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1622 struct adapter *adap = netdev2adap(dev);
1623 u32 v1, v2, lp_count, hp_count;
1625 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1626 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1627 if (is_t4(adap->params.chip)) {
1628 lp_count = LP_COUNT_G(v1);
1629 hp_count = HP_COUNT_G(v1);
1631 lp_count = LP_COUNT_T5_G(v1);
1632 hp_count = HP_COUNT_T5_G(v2);
1634 return lpfifo ? lp_count : hp_count;
1636 EXPORT_SYMBOL(cxgb4_dbfifo_count);
1639 * cxgb4_port_viid - get the VI id of a port
1640 * @dev: the net device for the port
1642 * Return the VI id of the given port.
1644 unsigned int cxgb4_port_viid(const struct net_device *dev)
1646 return netdev2pinfo(dev)->viid;
1648 EXPORT_SYMBOL(cxgb4_port_viid);
1651 * cxgb4_port_idx - get the index of a port
1652 * @dev: the net device for the port
1654 * Return the index of the given port.
1656 unsigned int cxgb4_port_idx(const struct net_device *dev)
1658 return netdev2pinfo(dev)->port_id;
1660 EXPORT_SYMBOL(cxgb4_port_idx);
1662 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
1663 struct tp_tcp_stats *v6)
1665 struct adapter *adap = pci_get_drvdata(pdev);
1667 spin_lock(&adap->stats_lock);
1668 t4_tp_get_tcp_stats(adap, v4, v6, false);
1669 spin_unlock(&adap->stats_lock);
1671 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
1673 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
1674 const unsigned int *pgsz_order)
1676 struct adapter *adap = netdev2adap(dev);
1678 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
1679 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
1680 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
1681 HPZ3_V(pgsz_order[3]));
1683 EXPORT_SYMBOL(cxgb4_iscsi_init);
1685 int cxgb4_flush_eq_cache(struct net_device *dev)
1687 struct adapter *adap = netdev2adap(dev);
1689 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
1691 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
1693 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
1695 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
1699 spin_lock(&adap->win0_lock);
1700 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
1701 sizeof(indices), (__be32 *)&indices,
1703 spin_unlock(&adap->win0_lock);
1705 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
1706 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
1711 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
1714 struct adapter *adap = netdev2adap(dev);
1715 u16 hw_pidx, hw_cidx;
1718 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
1722 if (pidx != hw_pidx) {
1726 if (pidx >= hw_pidx)
1727 delta = pidx - hw_pidx;
1729 delta = size - hw_pidx + pidx;
1731 if (is_t4(adap->params.chip))
1732 val = PIDX_V(delta);
1734 val = PIDX_T5_V(delta);
1736 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1742 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
1744 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
1746 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
1747 u32 edc0_end, edc1_end, mc0_end, mc1_end;
1748 u32 offset, memtype, memaddr;
1749 struct adapter *adap;
1753 adap = netdev2adap(dev);
1755 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
1757 /* Figure out where the offset lands in the Memory Type/Address scheme.
1758 * This code assumes that the memory is laid out starting at offset 0
1759 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
1760 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
1761 * MC0, and some have both MC0 and MC1.
1763 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
1764 edc0_size = EDRAM0_SIZE_G(size) << 20;
1765 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
1766 edc1_size = EDRAM1_SIZE_G(size) << 20;
1767 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
1768 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
1770 if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
1771 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1772 hma_size = EXT_MEM1_SIZE_G(size) << 20;
1774 edc0_end = edc0_size;
1775 edc1_end = edc0_end + edc1_size;
1776 mc0_end = edc1_end + mc0_size;
1778 if (offset < edc0_end) {
1781 } else if (offset < edc1_end) {
1783 memaddr = offset - edc0_end;
1785 if (hma_size && (offset < (edc1_end + hma_size))) {
1787 memaddr = offset - edc1_end;
1788 } else if (offset < mc0_end) {
1790 memaddr = offset - edc1_end;
1791 } else if (is_t5(adap->params.chip)) {
1792 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1793 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
1794 mc1_end = mc0_end + mc1_size;
1795 if (offset < mc1_end) {
1797 memaddr = offset - mc0_end;
1799 /* offset beyond the end of any memory */
1803 /* T4/T6 only has a single memory channel */
1808 spin_lock(&adap->win0_lock);
1809 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
1810 spin_unlock(&adap->win0_lock);
1814 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
1818 EXPORT_SYMBOL(cxgb4_read_tpte);
1820 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
1823 struct adapter *adap;
1825 adap = netdev2adap(dev);
1826 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
1827 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
1829 return ((u64)hi << 32) | (u64)lo;
1831 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
1833 int cxgb4_bar2_sge_qregs(struct net_device *dev,
1835 enum cxgb4_bar2_qtype qtype,
1838 unsigned int *pbar2_qid)
1840 return t4_bar2_sge_qregs(netdev2adap(dev),
1842 (qtype == CXGB4_BAR2_QTYPE_EGRESS
1843 ? T4_BAR2_QTYPE_EGRESS
1844 : T4_BAR2_QTYPE_INGRESS),
1849 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
1851 static struct pci_driver cxgb4_driver;
1853 static void check_neigh_update(struct neighbour *neigh)
1855 const struct device *parent;
1856 const struct net_device *netdev = neigh->dev;
1858 if (is_vlan_dev(netdev))
1859 netdev = vlan_dev_real_dev(netdev);
1860 parent = netdev->dev.parent;
1861 if (parent && parent->driver == &cxgb4_driver.driver)
1862 t4_l2t_update(dev_get_drvdata(parent), neigh);
1865 static int netevent_cb(struct notifier_block *nb, unsigned long event,
1869 case NETEVENT_NEIGH_UPDATE:
1870 check_neigh_update(data);
1872 case NETEVENT_REDIRECT:
1879 static bool netevent_registered;
1880 static struct notifier_block cxgb4_netevent_nb = {
1881 .notifier_call = netevent_cb
1884 static void drain_db_fifo(struct adapter *adap, int usecs)
1886 u32 v1, v2, lp_count, hp_count;
1889 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1890 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1891 if (is_t4(adap->params.chip)) {
1892 lp_count = LP_COUNT_G(v1);
1893 hp_count = HP_COUNT_G(v1);
1895 lp_count = LP_COUNT_T5_G(v1);
1896 hp_count = HP_COUNT_T5_G(v2);
1899 if (lp_count == 0 && hp_count == 0)
1901 set_current_state(TASK_UNINTERRUPTIBLE);
1902 schedule_timeout(usecs_to_jiffies(usecs));
1906 static void disable_txq_db(struct sge_txq *q)
1908 unsigned long flags;
1910 spin_lock_irqsave(&q->db_lock, flags);
1912 spin_unlock_irqrestore(&q->db_lock, flags);
1915 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
1917 spin_lock_irq(&q->db_lock);
1918 if (q->db_pidx_inc) {
1919 /* Make sure that all writes to the TX descriptors
1920 * are committed before we tell HW about them.
1923 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1924 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
1928 spin_unlock_irq(&q->db_lock);
1931 static void disable_dbs(struct adapter *adap)
1935 for_each_ethrxq(&adap->sge, i)
1936 disable_txq_db(&adap->sge.ethtxq[i].q);
1937 if (is_offload(adap)) {
1938 struct sge_uld_txq_info *txq_info =
1939 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1942 for_each_ofldtxq(&adap->sge, i) {
1943 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1945 disable_txq_db(&txq->q);
1949 for_each_port(adap, i)
1950 disable_txq_db(&adap->sge.ctrlq[i].q);
1953 static void enable_dbs(struct adapter *adap)
1957 for_each_ethrxq(&adap->sge, i)
1958 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
1959 if (is_offload(adap)) {
1960 struct sge_uld_txq_info *txq_info =
1961 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1964 for_each_ofldtxq(&adap->sge, i) {
1965 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1967 enable_txq_db(adap, &txq->q);
1971 for_each_port(adap, i)
1972 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
1975 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
1977 enum cxgb4_uld type = CXGB4_ULD_RDMA;
1979 if (adap->uld && adap->uld[type].handle)
1980 adap->uld[type].control(adap->uld[type].handle, cmd);
1983 static void process_db_full(struct work_struct *work)
1985 struct adapter *adap;
1987 adap = container_of(work, struct adapter, db_full_task);
1989 drain_db_fifo(adap, dbfifo_drain_delay);
1991 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
1992 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1993 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1994 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
1995 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
1997 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1998 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2001 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2003 u16 hw_pidx, hw_cidx;
2006 spin_lock_irq(&q->db_lock);
2007 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2010 if (q->db_pidx != hw_pidx) {
2014 if (q->db_pidx >= hw_pidx)
2015 delta = q->db_pidx - hw_pidx;
2017 delta = q->size - hw_pidx + q->db_pidx;
2019 if (is_t4(adap->params.chip))
2020 val = PIDX_V(delta);
2022 val = PIDX_T5_V(delta);
2024 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2025 QID_V(q->cntxt_id) | val);
2030 spin_unlock_irq(&q->db_lock);
2032 CH_WARN(adap, "DB drop recovery failed.\n");
2035 static void recover_all_queues(struct adapter *adap)
2039 for_each_ethrxq(&adap->sge, i)
2040 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2041 if (is_offload(adap)) {
2042 struct sge_uld_txq_info *txq_info =
2043 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2045 for_each_ofldtxq(&adap->sge, i) {
2046 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2048 sync_txq_pidx(adap, &txq->q);
2052 for_each_port(adap, i)
2053 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2056 static void process_db_drop(struct work_struct *work)
2058 struct adapter *adap;
2060 adap = container_of(work, struct adapter, db_drop_task);
2062 if (is_t4(adap->params.chip)) {
2063 drain_db_fifo(adap, dbfifo_drain_delay);
2064 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2065 drain_db_fifo(adap, dbfifo_drain_delay);
2066 recover_all_queues(adap);
2067 drain_db_fifo(adap, dbfifo_drain_delay);
2069 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2070 } else if (is_t5(adap->params.chip)) {
2071 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2072 u16 qid = (dropped_db >> 15) & 0x1ffff;
2073 u16 pidx_inc = dropped_db & 0x1fff;
2075 unsigned int bar2_qid;
2078 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2079 0, &bar2_qoffset, &bar2_qid);
2081 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2082 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2084 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2085 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2087 /* Re-enable BAR2 WC */
2088 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2091 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2092 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2095 void t4_db_full(struct adapter *adap)
2097 if (is_t4(adap->params.chip)) {
2099 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2100 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2101 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2102 queue_work(adap->workq, &adap->db_full_task);
2106 void t4_db_dropped(struct adapter *adap)
2108 if (is_t4(adap->params.chip)) {
2110 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2112 queue_work(adap->workq, &adap->db_drop_task);
2115 void t4_register_netevent_notifier(void)
2117 if (!netevent_registered) {
2118 register_netevent_notifier(&cxgb4_netevent_nb);
2119 netevent_registered = true;
2123 static void detach_ulds(struct adapter *adap)
2127 mutex_lock(&uld_mutex);
2128 list_del(&adap->list_node);
2130 for (i = 0; i < CXGB4_ULD_MAX; i++)
2131 if (adap->uld && adap->uld[i].handle)
2132 adap->uld[i].state_change(adap->uld[i].handle,
2133 CXGB4_STATE_DETACH);
2135 if (netevent_registered && list_empty(&adapter_list)) {
2136 unregister_netevent_notifier(&cxgb4_netevent_nb);
2137 netevent_registered = false;
2139 mutex_unlock(&uld_mutex);
2142 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2146 mutex_lock(&uld_mutex);
2147 for (i = 0; i < CXGB4_ULD_MAX; i++)
2148 if (adap->uld && adap->uld[i].handle)
2149 adap->uld[i].state_change(adap->uld[i].handle,
2151 mutex_unlock(&uld_mutex);
2154 #if IS_ENABLED(CONFIG_IPV6)
2155 static int cxgb4_inet6addr_handler(struct notifier_block *this,
2156 unsigned long event, void *data)
2158 struct inet6_ifaddr *ifa = data;
2159 struct net_device *event_dev = ifa->idev->dev;
2160 const struct device *parent = NULL;
2161 #if IS_ENABLED(CONFIG_BONDING)
2162 struct adapter *adap;
2164 if (is_vlan_dev(event_dev))
2165 event_dev = vlan_dev_real_dev(event_dev);
2166 #if IS_ENABLED(CONFIG_BONDING)
2167 if (event_dev->flags & IFF_MASTER) {
2168 list_for_each_entry(adap, &adapter_list, list_node) {
2171 cxgb4_clip_get(adap->port[0],
2172 (const u32 *)ifa, 1);
2175 cxgb4_clip_release(adap->port[0],
2176 (const u32 *)ifa, 1);
2187 parent = event_dev->dev.parent;
2189 if (parent && parent->driver == &cxgb4_driver.driver) {
2192 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2195 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2204 static bool inet6addr_registered;
2205 static struct notifier_block cxgb4_inet6addr_notifier = {
2206 .notifier_call = cxgb4_inet6addr_handler
2209 static void update_clip(const struct adapter *adap)
2212 struct net_device *dev;
2217 for (i = 0; i < MAX_NPORTS; i++) {
2218 dev = adap->port[i];
2222 ret = cxgb4_update_root_dev_clip(dev);
2229 #endif /* IS_ENABLED(CONFIG_IPV6) */
2232 * cxgb_up - enable the adapter
2233 * @adap: adapter being enabled
2235 * Called when the first port is enabled, this function performs the
2236 * actions necessary to make an adapter operational, such as completing
2237 * the initialization of HW modules, and enabling interrupts.
2239 * Must be called with the rtnl lock held.
2241 static int cxgb_up(struct adapter *adap)
2245 mutex_lock(&uld_mutex);
2246 err = setup_sge_queues(adap);
2249 err = setup_rss(adap);
2253 if (adap->flags & USING_MSIX) {
2254 name_msix_vecs(adap);
2255 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2256 adap->msix_info[0].desc, adap);
2259 err = request_msix_queue_irqs(adap);
2261 free_irq(adap->msix_info[0].vec, adap);
2265 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2266 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2267 adap->port[0]->name, adap);
2274 t4_intr_enable(adap);
2275 adap->flags |= FULL_INIT_DONE;
2276 mutex_unlock(&uld_mutex);
2278 notify_ulds(adap, CXGB4_STATE_UP);
2279 #if IS_ENABLED(CONFIG_IPV6)
2285 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2287 t4_free_sge_resources(adap);
2289 mutex_unlock(&uld_mutex);
2293 static void cxgb_down(struct adapter *adapter)
2295 cancel_work_sync(&adapter->tid_release_task);
2296 cancel_work_sync(&adapter->db_full_task);
2297 cancel_work_sync(&adapter->db_drop_task);
2298 adapter->tid_release_task_busy = false;
2299 adapter->tid_release_head = NULL;
2301 t4_sge_stop(adapter);
2302 t4_free_sge_resources(adapter);
2304 adapter->flags &= ~FULL_INIT_DONE;
2308 * net_device operations
2310 static int cxgb_open(struct net_device *dev)
2313 struct port_info *pi = netdev_priv(dev);
2314 struct adapter *adapter = pi->adapter;
2316 netif_carrier_off(dev);
2318 if (!(adapter->flags & FULL_INIT_DONE)) {
2319 err = cxgb_up(adapter);
2324 /* It's possible that the basic port information could have
2325 * changed since we first read it.
2327 err = t4_update_port_info(pi);
2331 err = link_start(dev);
2333 netif_tx_start_all_queues(dev);
2337 static int cxgb_close(struct net_device *dev)
2339 struct port_info *pi = netdev_priv(dev);
2340 struct adapter *adapter = pi->adapter;
2343 netif_tx_stop_all_queues(dev);
2344 netif_carrier_off(dev);
2345 ret = t4_enable_pi_params(adapter, adapter->pf, pi,
2346 false, false, false);
2347 #ifdef CONFIG_CHELSIO_T4_DCB
2348 cxgb4_dcb_reset(dev);
2349 dcb_tx_queue_prio_enable(dev, false);
2354 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2355 __be32 sip, __be16 sport, __be16 vlan,
2356 unsigned int queue, unsigned char port, unsigned char mask)
2359 struct filter_entry *f;
2360 struct adapter *adap;
2364 adap = netdev2adap(dev);
2366 /* Adjust stid to correct filter index */
2367 stid -= adap->tids.sftid_base;
2368 stid += adap->tids.nftids;
2370 /* Check to make sure the filter requested is writable ...
2372 f = &adap->tids.ftid_tab[stid];
2373 ret = writable_filter(f);
2377 /* Clear out any old resources being used by the filter before
2378 * we start constructing the new filter.
2381 clear_filter(adap, f);
2383 /* Clear out filter specifications */
2384 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2385 f->fs.val.lport = cpu_to_be16(sport);
2386 f->fs.mask.lport = ~0;
2388 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2389 for (i = 0; i < 4; i++) {
2390 f->fs.val.lip[i] = val[i];
2391 f->fs.mask.lip[i] = ~0;
2393 if (adap->params.tp.vlan_pri_map & PORT_F) {
2394 f->fs.val.iport = port;
2395 f->fs.mask.iport = mask;
2399 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2400 f->fs.val.proto = IPPROTO_TCP;
2401 f->fs.mask.proto = ~0;
2406 /* Mark filter as locked */
2410 /* Save the actual tid. We need this to get the corresponding
2411 * filter entry structure in filter_rpl.
2413 f->tid = stid + adap->tids.ftid_base;
2414 ret = set_filter_wr(adap, stid);
2416 clear_filter(adap, f);
2422 EXPORT_SYMBOL(cxgb4_create_server_filter);
2424 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2425 unsigned int queue, bool ipv6)
2427 struct filter_entry *f;
2428 struct adapter *adap;
2430 adap = netdev2adap(dev);
2432 /* Adjust stid to correct filter index */
2433 stid -= adap->tids.sftid_base;
2434 stid += adap->tids.nftids;
2436 f = &adap->tids.ftid_tab[stid];
2437 /* Unlock the filter */
2440 return delete_filter(adap, stid);
2442 EXPORT_SYMBOL(cxgb4_remove_server_filter);
2444 static void cxgb_get_stats(struct net_device *dev,
2445 struct rtnl_link_stats64 *ns)
2447 struct port_stats stats;
2448 struct port_info *p = netdev_priv(dev);
2449 struct adapter *adapter = p->adapter;
2451 /* Block retrieving statistics during EEH error
2452 * recovery. Otherwise, the recovery might fail
2453 * and the PCI device will be removed permanently
2455 spin_lock(&adapter->stats_lock);
2456 if (!netif_device_present(dev)) {
2457 spin_unlock(&adapter->stats_lock);
2460 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2462 spin_unlock(&adapter->stats_lock);
2464 ns->tx_bytes = stats.tx_octets;
2465 ns->tx_packets = stats.tx_frames;
2466 ns->rx_bytes = stats.rx_octets;
2467 ns->rx_packets = stats.rx_frames;
2468 ns->multicast = stats.rx_mcast_frames;
2470 /* detailed rx_errors */
2471 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2473 ns->rx_over_errors = 0;
2474 ns->rx_crc_errors = stats.rx_fcs_err;
2475 ns->rx_frame_errors = stats.rx_symbol_err;
2476 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
2477 stats.rx_ovflow2 + stats.rx_ovflow3 +
2478 stats.rx_trunc0 + stats.rx_trunc1 +
2479 stats.rx_trunc2 + stats.rx_trunc3;
2480 ns->rx_missed_errors = 0;
2482 /* detailed tx_errors */
2483 ns->tx_aborted_errors = 0;
2484 ns->tx_carrier_errors = 0;
2485 ns->tx_fifo_errors = 0;
2486 ns->tx_heartbeat_errors = 0;
2487 ns->tx_window_errors = 0;
2489 ns->tx_errors = stats.tx_error_frames;
2490 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2491 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2494 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2497 int ret = 0, prtad, devad;
2498 struct port_info *pi = netdev_priv(dev);
2499 struct adapter *adapter = pi->adapter;
2500 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2504 if (pi->mdio_addr < 0)
2506 data->phy_id = pi->mdio_addr;
2510 if (mdio_phy_id_is_c45(data->phy_id)) {
2511 prtad = mdio_phy_id_prtad(data->phy_id);
2512 devad = mdio_phy_id_devad(data->phy_id);
2513 } else if (data->phy_id < 32) {
2514 prtad = data->phy_id;
2516 data->reg_num &= 0x1f;
2520 mbox = pi->adapter->pf;
2521 if (cmd == SIOCGMIIREG)
2522 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2523 data->reg_num, &data->val_out);
2525 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2526 data->reg_num, data->val_in);
2529 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2530 sizeof(pi->tstamp_config)) ?
2533 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
2534 sizeof(pi->tstamp_config)))
2537 if (!is_t4(adapter->params.chip)) {
2538 switch (pi->tstamp_config.tx_type) {
2539 case HWTSTAMP_TX_OFF:
2540 case HWTSTAMP_TX_ON:
2546 switch (pi->tstamp_config.rx_filter) {
2547 case HWTSTAMP_FILTER_NONE:
2548 pi->rxtstamp = false;
2550 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2551 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2552 cxgb4_ptprx_timestamping(pi, pi->port_id,
2555 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2556 cxgb4_ptprx_timestamping(pi, pi->port_id,
2559 case HWTSTAMP_FILTER_ALL:
2560 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2561 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2562 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2563 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2564 pi->rxtstamp = true;
2567 pi->tstamp_config.rx_filter =
2568 HWTSTAMP_FILTER_NONE;
2572 if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
2573 (pi->tstamp_config.rx_filter ==
2574 HWTSTAMP_FILTER_NONE)) {
2575 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
2576 pi->ptp_enable = false;
2579 if (pi->tstamp_config.rx_filter !=
2580 HWTSTAMP_FILTER_NONE) {
2581 if (cxgb4_ptp_redirect_rx_packet(adapter,
2583 pi->ptp_enable = true;
2586 /* For T4 Adapters */
2587 switch (pi->tstamp_config.rx_filter) {
2588 case HWTSTAMP_FILTER_NONE:
2589 pi->rxtstamp = false;
2591 case HWTSTAMP_FILTER_ALL:
2592 pi->rxtstamp = true;
2595 pi->tstamp_config.rx_filter =
2596 HWTSTAMP_FILTER_NONE;
2600 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2601 sizeof(pi->tstamp_config)) ?
2609 static void cxgb_set_rxmode(struct net_device *dev)
2611 /* unfortunately we can't return errors to the stack */
2612 set_rxmode(dev, -1, false);
2615 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2618 struct port_info *pi = netdev_priv(dev);
2620 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
2627 #ifdef CONFIG_PCI_IOV
2628 static int cxgb4_mgmt_open(struct net_device *dev)
2630 /* Turn carrier off since we don't have to transmit anything on this
2633 netif_carrier_off(dev);
2637 /* Fill MAC address that will be assigned by the FW */
2638 static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
2640 u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
2641 unsigned int i, vf, nvfs;
2646 adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
2648 err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
2652 na = adap->params.vpd.na;
2653 for (i = 0; i < ETH_ALEN; i++)
2654 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
2655 hex2val(na[2 * i + 1]));
2657 a = (hw_addr[0] << 8) | hw_addr[1];
2658 b = (hw_addr[1] << 8) | hw_addr[2];
2660 a |= 0x0200; /* locally assigned Ethernet MAC address */
2661 a &= ~0x0100; /* not a multicast Ethernet MAC address */
2662 macaddr[0] = a >> 8;
2663 macaddr[1] = a & 0xff;
2665 for (i = 2; i < 5; i++)
2666 macaddr[i] = hw_addr[i + 1];
2668 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
2670 macaddr[5] = adap->pf * 16 + vf;
2671 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
2675 static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
2677 struct port_info *pi = netdev_priv(dev);
2678 struct adapter *adap = pi->adapter;
2681 /* verify MAC addr is valid */
2682 if (!is_valid_ether_addr(mac)) {
2683 dev_err(pi->adapter->pdev_dev,
2684 "Invalid Ethernet address %pM for VF %d\n",
2689 dev_info(pi->adapter->pdev_dev,
2690 "Setting MAC %pM on VF %d\n", mac, vf);
2691 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
2693 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
2697 static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
2698 int vf, struct ifla_vf_info *ivi)
2700 struct port_info *pi = netdev_priv(dev);
2701 struct adapter *adap = pi->adapter;
2702 struct vf_info *vfinfo;
2704 if (vf >= adap->num_vfs)
2706 vfinfo = &adap->vfinfo[vf];
2709 ivi->max_tx_rate = vfinfo->tx_rate;
2710 ivi->min_tx_rate = 0;
2711 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
2712 ivi->vlan = vfinfo->vlan;
2716 static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
2717 struct netdev_phys_item_id *ppid)
2719 struct port_info *pi = netdev_priv(dev);
2720 unsigned int phy_port_id;
2722 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
2723 ppid->id_len = sizeof(phy_port_id);
2724 memcpy(ppid->id, &phy_port_id, ppid->id_len);
2728 static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
2729 int min_tx_rate, int max_tx_rate)
2731 struct port_info *pi = netdev_priv(dev);
2732 struct adapter *adap = pi->adapter;
2733 unsigned int link_ok, speed, mtu;
2734 u32 fw_pfvf, fw_class;
2739 if (vf >= adap->num_vfs)
2743 dev_err(adap->pdev_dev,
2744 "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
2749 ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
2750 if (ret != FW_SUCCESS) {
2751 dev_err(adap->pdev_dev,
2752 "Failed to get link information for VF %d\n", vf);
2757 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
2761 if (max_tx_rate > speed) {
2762 dev_err(adap->pdev_dev,
2763 "Max tx rate %d for VF %d can't be > link-speed %u",
2764 max_tx_rate, vf, speed);
2769 /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
2770 pktsize = pktsize - sizeof(struct ethhdr) - 4;
2771 /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
2772 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
2773 /* configure Traffic Class for rate-limiting */
2774 ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
2775 SCHED_CLASS_LEVEL_CL_RL,
2776 SCHED_CLASS_MODE_CLASS,
2777 SCHED_CLASS_RATEUNIT_BITS,
2778 SCHED_CLASS_RATEMODE_ABS,
2779 pi->tx_chan, class_id, 0,
2780 max_tx_rate * 1000, 0, pktsize);
2782 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
2786 dev_info(adap->pdev_dev,
2787 "Class %d with MSS %u configured with rate %u\n",
2788 class_id, pktsize, max_tx_rate);
2790 /* bind VF to configured Traffic Class */
2791 fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2792 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
2793 fw_class = class_id;
2794 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
2797 dev_err(adap->pdev_dev,
2798 "Err %d in binding VF %d to Traffic Class %d\n",
2802 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
2803 adap->pf, vf, class_id);
2804 adap->vfinfo[vf].tx_rate = max_tx_rate;
2808 static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
2809 u16 vlan, u8 qos, __be16 vlan_proto)
2811 struct port_info *pi = netdev_priv(dev);
2812 struct adapter *adap = pi->adapter;
2815 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
2818 if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
2819 return -EPROTONOSUPPORT;
2821 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
2823 adap->vfinfo[vf].vlan = vlan;
2827 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
2828 ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
2831 #endif /* CONFIG_PCI_IOV */
2833 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2836 struct sockaddr *addr = p;
2837 struct port_info *pi = netdev_priv(dev);
2839 if (!is_valid_ether_addr(addr->sa_data))
2840 return -EADDRNOTAVAIL;
2842 ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
2843 pi->xact_addr_filt, addr->sa_data, true, true);
2847 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2848 pi->xact_addr_filt = ret;
2852 #ifdef CONFIG_NET_POLL_CONTROLLER
2853 static void cxgb_netpoll(struct net_device *dev)
2855 struct port_info *pi = netdev_priv(dev);
2856 struct adapter *adap = pi->adapter;
2858 if (adap->flags & USING_MSIX) {
2860 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2862 for (i = pi->nqsets; i; i--, rx++)
2863 t4_sge_intr_msix(0, &rx->rspq);
2865 t4_intr_handler(adap)(0, adap);
2869 static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
2871 struct port_info *pi = netdev_priv(dev);
2872 struct adapter *adap = pi->adapter;
2873 struct sched_class *e;
2874 struct ch_sched_params p;
2875 struct ch_sched_queue qe;
2879 if (!can_sched(dev))
2882 if (index < 0 || index > pi->nqsets - 1)
2885 if (!(adap->flags & FULL_INIT_DONE)) {
2886 dev_err(adap->pdev_dev,
2887 "Failed to rate limit on queue %d. Link Down?\n",
2892 /* Convert from Mbps to Kbps */
2893 req_rate = rate * 1000;
2895 /* Max rate is 100 Gbps */
2896 if (req_rate > SCHED_MAX_RATE_KBPS) {
2897 dev_err(adap->pdev_dev,
2898 "Invalid rate %u Mbps, Max rate is %u Mbps\n",
2899 rate, SCHED_MAX_RATE_KBPS / 1000);
2903 /* First unbind the queue from any existing class */
2904 memset(&qe, 0, sizeof(qe));
2906 qe.class = SCHED_CLS_NONE;
2908 err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
2910 dev_err(adap->pdev_dev,
2911 "Unbinding Queue %d on port %d fail. Err: %d\n",
2912 index, pi->port_id, err);
2916 /* Queue already unbound */
2920 /* Fetch any available unused or matching scheduling class */
2921 memset(&p, 0, sizeof(p));
2922 p.type = SCHED_CLASS_TYPE_PACKET;
2923 p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
2924 p.u.params.mode = SCHED_CLASS_MODE_CLASS;
2925 p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
2926 p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
2927 p.u.params.channel = pi->tx_chan;
2928 p.u.params.class = SCHED_CLS_NONE;
2929 p.u.params.minrate = 0;
2930 p.u.params.maxrate = req_rate;
2931 p.u.params.weight = 0;
2932 p.u.params.pktsize = dev->mtu;
2934 e = cxgb4_sched_class_alloc(dev, &p);
2938 /* Bind the queue to a scheduling class */
2939 memset(&qe, 0, sizeof(qe));
2943 err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
2945 dev_err(adap->pdev_dev,
2946 "Queue rate limiting failed. Err: %d\n", err);
2950 static int cxgb_setup_tc_flower(struct net_device *dev,
2951 struct tc_cls_flower_offload *cls_flower)
2953 switch (cls_flower->command) {
2954 case TC_CLSFLOWER_REPLACE:
2955 return cxgb4_tc_flower_replace(dev, cls_flower);
2956 case TC_CLSFLOWER_DESTROY:
2957 return cxgb4_tc_flower_destroy(dev, cls_flower);
2958 case TC_CLSFLOWER_STATS:
2959 return cxgb4_tc_flower_stats(dev, cls_flower);
2965 static int cxgb_setup_tc_cls_u32(struct net_device *dev,
2966 struct tc_cls_u32_offload *cls_u32)
2968 switch (cls_u32->command) {
2969 case TC_CLSU32_NEW_KNODE:
2970 case TC_CLSU32_REPLACE_KNODE:
2971 return cxgb4_config_knode(dev, cls_u32);
2972 case TC_CLSU32_DELETE_KNODE:
2973 return cxgb4_delete_knode(dev, cls_u32);
2979 static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2982 struct net_device *dev = cb_priv;
2983 struct port_info *pi = netdev2pinfo(dev);
2984 struct adapter *adap = netdev2adap(dev);
2986 if (!(adap->flags & FULL_INIT_DONE)) {
2987 dev_err(adap->pdev_dev,
2988 "Failed to setup tc on port %d. Link Down?\n",
2993 if (!tc_cls_can_offload_and_chain0(dev, type_data))
2997 case TC_SETUP_CLSU32:
2998 return cxgb_setup_tc_cls_u32(dev, type_data);
2999 case TC_SETUP_CLSFLOWER:
3000 return cxgb_setup_tc_flower(dev, type_data);
3006 static int cxgb_setup_tc_block(struct net_device *dev,
3007 struct tc_block_offload *f)
3009 struct port_info *pi = netdev2pinfo(dev);
3011 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3014 switch (f->command) {
3016 return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
3017 pi, dev, f->extack);
3018 case TC_BLOCK_UNBIND:
3019 tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
3026 static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
3030 case TC_SETUP_BLOCK:
3031 return cxgb_setup_tc_block(dev, type_data);
3037 static void cxgb_del_udp_tunnel(struct net_device *netdev,
3038 struct udp_tunnel_info *ti)
3040 struct port_info *pi = netdev_priv(netdev);
3041 struct adapter *adapter = pi->adapter;
3042 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3043 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3046 if (chip_ver < CHELSIO_T6)
3050 case UDP_TUNNEL_TYPE_VXLAN:
3051 if (!adapter->vxlan_port_cnt ||
3052 adapter->vxlan_port != ti->port)
3053 return; /* Invalid VxLAN destination port */
3055 adapter->vxlan_port_cnt--;
3056 if (adapter->vxlan_port_cnt)
3059 adapter->vxlan_port = 0;
3060 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
3062 case UDP_TUNNEL_TYPE_GENEVE:
3063 if (!adapter->geneve_port_cnt ||
3064 adapter->geneve_port != ti->port)
3065 return; /* Invalid GENEVE destination port */
3067 adapter->geneve_port_cnt--;
3068 if (adapter->geneve_port_cnt)
3071 adapter->geneve_port = 0;
3072 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
3078 /* Matchall mac entries can be deleted only after all tunnel ports
3079 * are brought down or removed.
3081 if (!adapter->rawf_cnt)
3083 for_each_port(adapter, i) {
3084 pi = adap2pinfo(adapter, i);
3085 ret = t4_free_raw_mac_filt(adapter, pi->viid,
3086 match_all_mac, match_all_mac,
3087 adapter->rawf_start +
3089 1, pi->port_id, false);
3091 netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
3095 atomic_dec(&adapter->mps_encap[adapter->rawf_start +
3096 pi->port_id].refcnt);
3100 static void cxgb_add_udp_tunnel(struct net_device *netdev,
3101 struct udp_tunnel_info *ti)
3103 struct port_info *pi = netdev_priv(netdev);
3104 struct adapter *adapter = pi->adapter;
3105 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3106 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3109 if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
3113 case UDP_TUNNEL_TYPE_VXLAN:
3114 /* Callback for adding vxlan port can be called with the same
3115 * port for both IPv4 and IPv6. We should not disable the
3116 * offloading when the same port for both protocols is added
3117 * and later one of them is removed.
3119 if (adapter->vxlan_port_cnt &&
3120 adapter->vxlan_port == ti->port) {
3121 adapter->vxlan_port_cnt++;
3125 /* We will support only one VxLAN port */
3126 if (adapter->vxlan_port_cnt) {
3127 netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3128 be16_to_cpu(adapter->vxlan_port),
3129 be16_to_cpu(ti->port));
3133 adapter->vxlan_port = ti->port;
3134 adapter->vxlan_port_cnt = 1;
3136 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
3137 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3139 case UDP_TUNNEL_TYPE_GENEVE:
3140 if (adapter->geneve_port_cnt &&
3141 adapter->geneve_port == ti->port) {
3142 adapter->geneve_port_cnt++;
3146 /* We will support only one GENEVE port */
3147 if (adapter->geneve_port_cnt) {
3148 netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3149 be16_to_cpu(adapter->geneve_port),
3150 be16_to_cpu(ti->port));
3154 adapter->geneve_port = ti->port;
3155 adapter->geneve_port_cnt = 1;
3157 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3158 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3164 /* Create a 'match all' mac filter entry for inner mac,
3165 * if raw mac interface is supported. Once the linux kernel provides
3166 * driver entry points for adding/deleting the inner mac addresses,
3167 * we will remove this 'match all' entry and fallback to adding
3168 * exact match filters.
3170 for_each_port(adapter, i) {
3171 pi = adap2pinfo(adapter, i);
3173 ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3176 adapter->rawf_start +
3178 1, pi->port_id, false);
3180 netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
3181 be16_to_cpu(ti->port));
3182 cxgb_del_udp_tunnel(netdev, ti);
3185 atomic_inc(&adapter->mps_encap[ret].refcnt);
3189 static netdev_features_t cxgb_features_check(struct sk_buff *skb,
3190 struct net_device *dev,
3191 netdev_features_t features)
3193 struct port_info *pi = netdev_priv(dev);
3194 struct adapter *adapter = pi->adapter;
3196 if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3199 /* Check if hw supports offload for this packet */
3200 if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
3203 /* Offload is not supported for this encapsulated packet */
3204 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3207 static netdev_features_t cxgb_fix_features(struct net_device *dev,
3208 netdev_features_t features)
3210 /* Disable GRO, if RX_CSUM is disabled */
3211 if (!(features & NETIF_F_RXCSUM))
3212 features &= ~NETIF_F_GRO;
3217 static const struct net_device_ops cxgb4_netdev_ops = {
3218 .ndo_open = cxgb_open,
3219 .ndo_stop = cxgb_close,
3220 .ndo_start_xmit = t4_start_xmit,
3221 .ndo_select_queue = cxgb_select_queue,
3222 .ndo_get_stats64 = cxgb_get_stats,
3223 .ndo_set_rx_mode = cxgb_set_rxmode,
3224 .ndo_set_mac_address = cxgb_set_mac_addr,
3225 .ndo_set_features = cxgb_set_features,
3226 .ndo_validate_addr = eth_validate_addr,
3227 .ndo_do_ioctl = cxgb_ioctl,
3228 .ndo_change_mtu = cxgb_change_mtu,
3229 #ifdef CONFIG_NET_POLL_CONTROLLER
3230 .ndo_poll_controller = cxgb_netpoll,
3232 #ifdef CONFIG_CHELSIO_T4_FCOE
3233 .ndo_fcoe_enable = cxgb_fcoe_enable,
3234 .ndo_fcoe_disable = cxgb_fcoe_disable,
3235 #endif /* CONFIG_CHELSIO_T4_FCOE */
3236 .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
3237 .ndo_setup_tc = cxgb_setup_tc,
3238 .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
3239 .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
3240 .ndo_features_check = cxgb_features_check,
3241 .ndo_fix_features = cxgb_fix_features,
3244 #ifdef CONFIG_PCI_IOV
3245 static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
3246 .ndo_open = cxgb4_mgmt_open,
3247 .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
3248 .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
3249 .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
3250 .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
3251 .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
3255 static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
3256 struct ethtool_drvinfo *info)
3258 struct adapter *adapter = netdev2adap(dev);
3260 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3261 strlcpy(info->version, cxgb4_driver_version,
3262 sizeof(info->version));
3263 strlcpy(info->bus_info, pci_name(adapter->pdev),
3264 sizeof(info->bus_info));
3267 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
3268 .get_drvinfo = cxgb4_mgmt_get_drvinfo,
3271 static void notify_fatal_err(struct work_struct *work)
3273 struct adapter *adap;
3275 adap = container_of(work, struct adapter, fatal_err_notify_task);
3276 notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
3279 void t4_fatal_err(struct adapter *adap)
3283 if (pci_channel_offline(adap->pdev))
3286 /* Disable the SGE since ULDs are going to free resources that
3287 * could be exposed to the adapter. RDMA MWs for example...
3289 t4_shutdown_adapter(adap);
3290 for_each_port(adap, port) {
3291 struct net_device *dev = adap->port[port];
3293 /* If we get here in very early initialization the network
3294 * devices may not have been set up yet.
3299 netif_tx_stop_all_queues(dev);
3300 netif_carrier_off(dev);
3302 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3303 queue_work(adap->workq, &adap->fatal_err_notify_task);
3306 static void setup_memwin(struct adapter *adap)
3308 u32 nic_win_base = t4_get_util_window(adap);
3310 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3313 static void setup_memwin_rdma(struct adapter *adap)
3315 if (adap->vres.ocq.size) {
3319 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3320 start &= PCI_BASE_ADDRESS_MEM_MASK;
3321 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3322 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3324 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3325 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3327 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3328 adap->vres.ocq.start);
3330 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3334 /* HMA Definitions */
3336 /* The maximum number of address that can be send in a single FW cmd */
3337 #define HMA_MAX_ADDR_IN_CMD 5
3339 #define HMA_PAGE_SIZE PAGE_SIZE
3341 #define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */
3343 #define HMA_PAGE_ORDER \
3344 ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
3345 ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3347 /* The minimum and maximum possible HMA sizes that can be specified in the FW
3348 * configuration(in units of MB).
3350 #define HMA_MIN_TOTAL_SIZE 1
3351 #define HMA_MAX_TOTAL_SIZE \
3352 (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
3353 HMA_MAX_NO_FW_ADDRESS) >> 20)
3355 static void adap_free_hma_mem(struct adapter *adapter)
3357 struct scatterlist *iter;
3361 if (!adapter->hma.sgt)
3364 if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
3365 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
3366 adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
3367 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
3370 for_each_sg(adapter->hma.sgt->sgl, iter,
3371 adapter->hma.sgt->orig_nents, i) {
3372 page = sg_page(iter);
3374 __free_pages(page, HMA_PAGE_ORDER);
3377 kfree(adapter->hma.phy_addr);
3378 sg_free_table(adapter->hma.sgt);
3379 kfree(adapter->hma.sgt);
3380 adapter->hma.sgt = NULL;
3383 static int adap_config_hma(struct adapter *adapter)
3385 struct scatterlist *sgl, *iter;
3386 struct sg_table *sgt;
3387 struct page *newpage;
3388 unsigned int i, j, k;
3389 u32 param, hma_size;
3395 /* HMA is supported only for T6+ cards.
3396 * Avoid initializing HMA in kdump kernels.
3398 if (is_kdump_kernel() ||
3399 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3402 /* Get the HMA region size required by fw */
3403 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3404 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
3405 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3406 1, ¶m, &hma_size);
3407 /* An error means card has its own memory or HMA is not supported by
3408 * the firmware. Return without any errors.
3410 if (ret || !hma_size)
3413 if (hma_size < HMA_MIN_TOTAL_SIZE ||
3414 hma_size > HMA_MAX_TOTAL_SIZE) {
3415 dev_err(adapter->pdev_dev,
3416 "HMA size %uMB beyond bounds(%u-%lu)MB\n",
3417 hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
3421 page_size = HMA_PAGE_SIZE;
3422 page_order = HMA_PAGE_ORDER;
3423 adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
3424 if (unlikely(!adapter->hma.sgt)) {
3425 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
3428 sgt = adapter->hma.sgt;
3429 /* FW returned value will be in MB's
3431 sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
3432 if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
3433 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
3434 kfree(adapter->hma.sgt);
3435 adapter->hma.sgt = NULL;
3439 sgl = adapter->hma.sgt->sgl;
3440 node = dev_to_node(adapter->pdev_dev);
3441 for_each_sg(sgl, iter, sgt->orig_nents, i) {
3442 newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
3443 __GFP_ZERO, page_order);
3445 dev_err(adapter->pdev_dev,
3446 "Not enough memory for HMA page allocation\n");
3450 sg_set_page(iter, newpage, page_size << page_order, 0);
3453 sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
3456 dev_err(adapter->pdev_dev,
3457 "Not enough memory for HMA DMA mapping");
3461 adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
3463 adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
3465 if (unlikely(!adapter->hma.phy_addr))
3468 for_each_sg(sgl, iter, sgt->nents, i) {
3469 newpage = sg_page(iter);
3470 adapter->hma.phy_addr[i] = sg_dma_address(iter);
3473 ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
3474 /* Pass on the addresses to firmware */
3475 for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
3476 struct fw_hma_cmd hma_cmd;
3477 u8 naddr = HMA_MAX_ADDR_IN_CMD;
3478 u8 soc = 0, eoc = 0;
3479 u8 hma_mode = 1; /* Presently we support only Page table mode */
3481 soc = (i == 0) ? 1 : 0;
3482 eoc = (i == ncmds - 1) ? 1 : 0;
3484 /* For last cmd, set naddr corresponding to remaining
3487 if (i == ncmds - 1) {
3488 naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
3489 naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
3491 memset(&hma_cmd, 0, sizeof(hma_cmd));
3492 hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
3493 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3494 hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
3496 hma_cmd.mode_to_pcie_params =
3497 htonl(FW_HMA_CMD_MODE_V(hma_mode) |
3498 FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
3500 /* HMA cmd size specified in MB's */
3501 hma_cmd.naddr_size =
3502 htonl(FW_HMA_CMD_SIZE_V(hma_size) |
3503 FW_HMA_CMD_NADDR_V(naddr));
3505 /* Total Page size specified in units of 4K */
3506 hma_cmd.addr_size_pkd =
3507 htonl(FW_HMA_CMD_ADDR_SIZE_V
3508 ((page_size << page_order) >> 12));
3510 /* Fill the 5 addresses */
3511 for (j = 0; j < naddr; j++) {
3512 hma_cmd.phy_address[j] =
3513 cpu_to_be64(adapter->hma.phy_addr[j + k]);
3515 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
3516 sizeof(hma_cmd), &hma_cmd);
3518 dev_err(adapter->pdev_dev,
3519 "HMA FW command failed with err %d\n", ret);
3525 dev_info(adapter->pdev_dev,
3526 "Reserved %uMB host memory for HMA\n", hma_size);
3530 adap_free_hma_mem(adapter);
3534 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3539 /* Now that we've successfully configured and initialized the adapter
3540 * can ask the Firmware what resources it has provisioned for us.
3542 ret = t4_get_pfres(adap);
3544 dev_err(adap->pdev_dev,
3545 "Unable to retrieve resource provisioning information\n");
3549 /* get device capabilities */
3550 memset(c, 0, sizeof(*c));
3551 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3552 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3553 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
3554 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
3558 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3559 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3560 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
3564 ret = t4_config_glbl_rss(adap, adap->pf,
3565 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3566 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3567 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
3571 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
3572 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3579 /* tweak some settings */
3580 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
3581 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
3582 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3583 v = t4_read_reg(adap, TP_PIO_DATA_A);
3584 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
3586 /* first 4 Tx modulation queues point to consecutive Tx channels */
3587 adap->params.tp.tx_modq_map = 0xE4;
3588 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3589 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
3591 /* associate each Tx modulation queue with consecutive Tx channels */
3593 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3594 &v, 1, TP_TX_SCHED_HDR_A);
3595 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3596 &v, 1, TP_TX_SCHED_FIFO_A);
3597 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3598 &v, 1, TP_TX_SCHED_PCMD_A);
3600 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3601 if (is_offload(adap)) {
3602 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3603 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3604 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3605 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3606 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3607 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3608 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3609 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3610 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3611 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3614 /* get basic stuff going */
3615 return t4_early_init(adap, adap->pf);
3619 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3621 #define MAX_ATIDS 8192U
3624 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3626 * If the firmware we're dealing with has Configuration File support, then
3627 * we use that to perform all configuration
3631 * Tweak configuration based on module parameters, etc. Most of these have
3632 * defaults assigned to them by Firmware Configuration Files (if we're using
3633 * them) but need to be explicitly set if we're using hard-coded
3634 * initialization. But even in the case of using Firmware Configuration
3635 * Files, we'd like to expose the ability to change these via module
3636 * parameters so these are essentially common tweaks/settings for
3637 * Configuration Files and hard-coded initialization ...
3639 static int adap_init0_tweaks(struct adapter *adapter)
3642 * Fix up various Host-Dependent Parameters like Page Size, Cache
3643 * Line Size, etc. The firmware default is for a 4KB Page Size and
3644 * 64B Cache Line Size ...
3646 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3649 * Process module parameters which affect early initialization.
3651 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3652 dev_err(&adapter->pdev->dev,
3653 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3657 t4_set_reg_field(adapter, SGE_CONTROL_A,
3658 PKTSHIFT_V(PKTSHIFT_M),
3659 PKTSHIFT_V(rx_dma_offset));
3662 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3663 * adds the pseudo header itself.
3665 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
3666 CSUM_HAS_PSEUDO_HDR_F, 0);
3671 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3672 * unto themselves and they contain their own firmware to perform their
3675 static int phy_aq1202_version(const u8 *phy_fw_data,
3680 /* At offset 0x8 you're looking for the primary image's
3681 * starting offset which is 3 Bytes wide
3683 * At offset 0xa of the primary image, you look for the offset
3684 * of the DRAM segment which is 3 Bytes wide.
3686 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3689 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3690 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3691 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3693 offset = le24(phy_fw_data + 0x8) << 12;
3694 offset = le24(phy_fw_data + offset + 0xa);
3695 return be16(phy_fw_data + offset + 0x27e);
3702 static struct info_10gbt_phy_fw {
3703 unsigned int phy_fw_id; /* PCI Device ID */
3704 char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
3705 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3706 int phy_flash; /* Has FLASH for PHY Firmware */
3707 } phy_info_array[] = {
3709 PHY_AQ1202_DEVICEID,
3710 PHY_AQ1202_FIRMWARE,
3715 PHY_BCM84834_DEVICEID,
3716 PHY_BCM84834_FIRMWARE,
3723 static struct info_10gbt_phy_fw *find_phy_info(int devid)
3727 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3728 if (phy_info_array[i].phy_fw_id == devid)
3729 return &phy_info_array[i];
3734 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3735 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3736 * we return a negative error number. If we transfer new firmware we return 1
3737 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3739 static int adap_init0_phy(struct adapter *adap)
3741 const struct firmware *phyf;
3743 struct info_10gbt_phy_fw *phy_info;
3745 /* Use the device ID to determine which PHY file to flash.
3747 phy_info = find_phy_info(adap->pdev->device);
3749 dev_warn(adap->pdev_dev,
3750 "No PHY Firmware file found for this PHY\n");
3754 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3755 * use that. The adapter firmware provides us with a memory buffer
3756 * where we can load a PHY firmware file from the host if we want to
3757 * override the PHY firmware File in flash.
3759 ret = reject_firmware_direct(&phyf, phy_info->phy_fw_file,
3762 /* For adapters without FLASH attached to PHY for their
3763 * firmware, it's obviously a fatal error if we can't get the
3764 * firmware to the adapter. For adapters with PHY firmware
3765 * FLASH storage, it's worth a warning if we can't find the
3766 * PHY Firmware but we'll neuter the error ...
3768 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3769 "/lib/firmware/%s, error %d\n",
3770 phy_info->phy_fw_file, -ret);
3771 if (phy_info->phy_flash) {
3772 int cur_phy_fw_ver = 0;
3774 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3775 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3776 "FLASH copy, version %#x\n", cur_phy_fw_ver);
3783 /* Load PHY Firmware onto adapter.
3785 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3786 phy_info->phy_fw_version,
3787 (u8 *)phyf->data, phyf->size);
3789 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3792 int new_phy_fw_ver = 0;
3794 if (phy_info->phy_fw_version)
3795 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3797 dev_info(adap->pdev_dev, "Successfully transferred PHY "
3798 "Firmware /lib/firmware/%s, version %#x\n",
3799 phy_info->phy_fw_file, new_phy_fw_ver);
3802 release_firmware(phyf);
3808 * Attempt to initialize the adapter via a Firmware Configuration File.
3810 static int adap_init0_config(struct adapter *adapter, int reset)
3812 struct fw_caps_config_cmd caps_cmd;
3813 const struct firmware *cf;
3814 unsigned long mtype = 0, maddr = 0;
3815 u32 finiver, finicsum, cfcsum;
3817 int config_issued = 0;
3818 char *fw_config_file, fw_config_file_path[256];
3819 char *config_name = NULL;
3822 * Reset device if necessary.
3825 ret = t4_fw_reset(adapter, adapter->mbox,
3826 PIORSTMODE_F | PIORST_F);
3831 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3832 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3833 * to be performed after any global adapter RESET above since some
3834 * PHYs only have local RAM copies of the PHY firmware.
3836 if (is_10gbt_device(adapter->pdev->device)) {
3837 ret = adap_init0_phy(adapter);
3842 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3843 * then use that. Otherwise, use the configuration file stored
3844 * in the adapter flash ...
3846 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
3848 fw_config_file = FW4_CFNAME;
3851 fw_config_file = FW5_CFNAME;
3854 fw_config_file = FW6_CFNAME;
3857 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3858 adapter->pdev->device);
3863 ret = reject_firmware(&cf, fw_config_file, adapter->pdev_dev);
3865 config_name = "On FLASH";
3866 mtype = FW_MEMTYPE_CF_FLASH;
3867 maddr = t4_flash_cfg_addr(adapter);
3869 u32 params[7], val[7];
3871 sprintf(fw_config_file_path,
3872 "/lib/firmware/%s", fw_config_file);
3873 config_name = fw_config_file_path;
3875 if (cf->size >= FLASH_CFG_MAX_SIZE)
3878 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3879 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3880 ret = t4_query_params(adapter, adapter->mbox,
3881 adapter->pf, 0, 1, params, val);
3884 * For t4_memory_rw() below addresses and
3885 * sizes have to be in terms of multiples of 4
3886 * bytes. So, if the Configuration File isn't
3887 * a multiple of 4 bytes in length we'll have
3888 * to write that out separately since we can't
3889 * guarantee that the bytes following the
3890 * residual byte in the buffer returned by
3891 * reject_firmware() are zeroed out ...
3893 size_t resid = cf->size & 0x3;
3894 size_t size = cf->size & ~0x3;
3895 __be32 *data = (__be32 *)cf->data;
3897 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3898 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
3900 spin_lock(&adapter->win0_lock);
3901 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3902 size, data, T4_MEMORY_WRITE);
3903 if (ret == 0 && resid != 0) {
3910 last.word = data[size >> 2];
3911 for (i = resid; i < 4; i++)
3913 ret = t4_memory_rw(adapter, 0, mtype,
3918 spin_unlock(&adapter->win0_lock);
3922 release_firmware(cf);
3928 * Issue a Capability Configuration command to the firmware to get it
3929 * to parse the Configuration File. We don't use t4_fw_config_file()
3930 * because we want the ability to modify various features after we've
3931 * processed the configuration file ...
3933 memset(&caps_cmd, 0, sizeof(caps_cmd));
3934 caps_cmd.op_to_write =
3935 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3938 caps_cmd.cfvalid_to_len16 =
3939 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3940 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3941 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
3942 FW_LEN16(caps_cmd));
3943 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3946 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3947 * Configuration File in FLASH), our last gasp effort is to use the
3948 * Firmware Configuration File which is embedded in the firmware. A
3949 * very few early versions of the firmware didn't have one embedded
3950 * but we can ignore those.
3952 if (ret == -ENOENT) {
3953 memset(&caps_cmd, 0, sizeof(caps_cmd));
3954 caps_cmd.op_to_write =
3955 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3958 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3959 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3960 sizeof(caps_cmd), &caps_cmd);
3961 config_name = "Firmware Default";
3968 finiver = ntohl(caps_cmd.finiver);
3969 finicsum = ntohl(caps_cmd.finicsum);
3970 cfcsum = ntohl(caps_cmd.cfcsum);
3971 if (finicsum != cfcsum)
3972 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3973 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3977 * And now tell the firmware to use the configuration we just loaded.
3979 caps_cmd.op_to_write =
3980 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3983 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3984 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3990 * Tweak configuration based on system architecture, module
3993 ret = adap_init0_tweaks(adapter);
3997 /* We will proceed even if HMA init fails. */
3998 ret = adap_config_hma(adapter);
4000 dev_err(adapter->pdev_dev,
4001 "HMA configuration failed with error %d\n", ret);
4004 * And finally tell the firmware to initialize itself using the
4005 * parameters from the Configuration File.
4007 ret = t4_fw_initialize(adapter, adapter->mbox);
4011 /* Emit Firmware Configuration File information and return
4014 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4015 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4016 config_name, finiver, cfcsum);
4020 * Something bad happened. Return the error ... (If the "error"
4021 * is that there's no Configuration File on the adapter we don't
4022 * want to issue a warning since this is fairly common.)
4025 if (config_issued && ret != -ENOENT)
4026 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4031 static struct fw_info fw_info_array[] = {
4034 .fs_name = FW4_CFNAME,
4035 .fw_mod_name = FW4_FNAME,
4037 .chip = FW_HDR_CHIP_T4,
4038 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
4039 .intfver_nic = FW_INTFVER(T4, NIC),
4040 .intfver_vnic = FW_INTFVER(T4, VNIC),
4041 .intfver_ri = FW_INTFVER(T4, RI),
4042 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
4043 .intfver_fcoe = FW_INTFVER(T4, FCOE),
4047 .fs_name = FW5_CFNAME,
4048 .fw_mod_name = FW5_FNAME,
4050 .chip = FW_HDR_CHIP_T5,
4051 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
4052 .intfver_nic = FW_INTFVER(T5, NIC),
4053 .intfver_vnic = FW_INTFVER(T5, VNIC),
4054 .intfver_ri = FW_INTFVER(T5, RI),
4055 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
4056 .intfver_fcoe = FW_INTFVER(T5, FCOE),
4060 .fs_name = FW6_CFNAME,
4061 .fw_mod_name = FW6_FNAME,
4063 .chip = FW_HDR_CHIP_T6,
4064 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
4065 .intfver_nic = FW_INTFVER(T6, NIC),
4066 .intfver_vnic = FW_INTFVER(T6, VNIC),
4067 .intfver_ofld = FW_INTFVER(T6, OFLD),
4068 .intfver_ri = FW_INTFVER(T6, RI),
4069 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4070 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
4071 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4072 .intfver_fcoe = FW_INTFVER(T6, FCOE),
4078 static struct fw_info *find_fw_info(int chip)
4082 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
4083 if (fw_info_array[i].chip == chip)
4084 return &fw_info_array[i];
4090 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4092 static int adap_init0(struct adapter *adap)
4096 enum dev_state state;
4097 u32 params[7], val[7];
4098 struct fw_caps_config_cmd caps_cmd;
4101 /* Grab Firmware Device Log parameters as early as possible so we have
4102 * access to it for debugging, etc.
4104 ret = t4_init_devlog_params(adap);
4108 /* Contact FW, advertising Master capability */
4109 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
4110 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
4112 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4116 if (ret == adap->mbox)
4117 adap->flags |= MASTER_PF;
4120 * If we're the Master PF Driver and the device is uninitialized,
4121 * then let's consider upgrading the firmware ... (We always want
4122 * to check the firmware version number in order to A. get it for
4123 * later reporting and B. to warn if the currently loaded firmware
4124 * is excessively mismatched relative to the driver.)
4127 t4_get_version_info(adap);
4128 ret = t4_check_fw_version(adap);
4129 /* If firmware is too old (not supported by driver) force an update. */
4131 state = DEV_STATE_UNINIT;
4132 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4133 struct fw_info *fw_info;
4134 struct fw_hdr *card_fw;
4135 const struct firmware *fw;
4136 const u8 *fw_data = NULL;
4137 unsigned int fw_size = 0;
4139 /* This is the firmware whose headers the driver was compiled
4142 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
4143 if (fw_info == NULL) {
4144 dev_err(adap->pdev_dev,
4145 "unable to get firmware info for chip %d.\n",
4146 CHELSIO_CHIP_VERSION(adap->params.chip));
4150 /* allocate memory to read the header of the firmware on the
4153 card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
4159 /* Get FW from from /lib/firmware/ */
4160 ret = reject_firmware(&fw, fw_info->fw_mod_name,
4163 dev_err(adap->pdev_dev,
4164 "unable to load firmware image %s, error %d\n",
4165 fw_info->fw_mod_name, ret);
4171 /* upgrade FW logic */
4172 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
4176 release_firmware(fw);
4183 /* If the firmware is initialized already, emit a simply note to that
4184 * effect. Otherwise, it's time to try initializing the adapter.
4186 if (state == DEV_STATE_INIT) {
4187 ret = adap_config_hma(adap);
4189 dev_err(adap->pdev_dev,
4190 "HMA configuration failed with error %d\n",
4192 dev_info(adap->pdev_dev, "Coming up as %s: "\
4193 "Adapter already initialized\n",
4194 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4196 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4197 "Initializing adapter\n");
4199 /* Find out whether we're dealing with a version of the
4200 * firmware which has configuration file support.
4202 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4203 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4204 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4207 /* If the firmware doesn't support Configuration Files,
4211 dev_err(adap->pdev_dev, "firmware doesn't support "
4212 "Firmware Configuration Files\n");
4216 /* The firmware provides us with a memory buffer where we can
4217 * load a Configuration File from the host if we want to
4218 * override the Configuration File in flash.
4220 ret = adap_init0_config(adap, reset);
4221 if (ret == -ENOENT) {
4222 dev_err(adap->pdev_dev, "no Configuration File "
4223 "present on adapter.\n");
4227 dev_err(adap->pdev_dev, "could not initialize "
4228 "adapter, error %d\n", -ret);
4233 /* Now that we've successfully configured and initialized the adapter
4234 * (or found it already initialized), we can ask the Firmware what
4235 * resources it has provisioned for us.
4237 ret = t4_get_pfres(adap);
4239 dev_err(adap->pdev_dev,
4240 "Unable to retrieve resource provisioning information\n");
4244 /* Grab VPD parameters. This should be done after we establish a
4245 * connection to the firmware since some of the VPD parameters
4246 * (notably the Core Clock frequency) are retrieved via requests to
4247 * the firmware. On the other hand, we need these fairly early on
4248 * so we do this right after getting ahold of the firmware.
4250 * We need to do this after initializing the adapter because someone
4251 * could have FLASHed a new VPD which won't be read by the firmware
4252 * until we do the RESET ...
4254 ret = t4_get_vpd_params(adap, &adap->params.vpd);
4258 /* Find out what ports are available to us. Note that we need to do
4259 * this before calling adap_init0_no_config() since it needs nports
4263 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4264 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
4265 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
4269 adap->params.nports = hweight32(port_vec);
4270 adap->params.portvec = port_vec;
4272 /* Give the SGE code a chance to pull in anything that it needs ...
4273 * Note that this must be called after we retrieve our VPD parameters
4274 * in order to know how to convert core ticks to seconds, etc.
4276 ret = t4_sge_init(adap);
4280 if (is_bypass_device(adap->pdev->device))
4281 adap->params.bypass = 1;
4284 * Grab some of our basic fundamental operating parameters.
4286 #define FW_PARAM_DEV(param) \
4287 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
4288 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
4290 #define FW_PARAM_PFVF(param) \
4291 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
4292 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
4293 FW_PARAMS_PARAM_Y_V(0) | \
4294 FW_PARAMS_PARAM_Z_V(0)
4296 params[0] = FW_PARAM_PFVF(EQ_START);
4297 params[1] = FW_PARAM_PFVF(L2T_START);
4298 params[2] = FW_PARAM_PFVF(L2T_END);
4299 params[3] = FW_PARAM_PFVF(FILTER_START);
4300 params[4] = FW_PARAM_PFVF(FILTER_END);
4301 params[5] = FW_PARAM_PFVF(IQFLINT_START);
4302 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
4305 adap->sge.egr_start = val[0];
4306 adap->l2t_start = val[1];
4307 adap->l2t_end = val[2];
4308 adap->tids.ftid_base = val[3];
4309 adap->tids.nftids = val[4] - val[3] + 1;
4310 adap->sge.ingr_start = val[5];
4312 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4313 /* Read the raw mps entries. In T6, the last 2 tcam entries
4314 * are reserved for raw mac addresses (rawf = 2, one per port).
4316 params[0] = FW_PARAM_PFVF(RAWF_START);
4317 params[1] = FW_PARAM_PFVF(RAWF_END);
4318 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4321 adap->rawf_start = val[0];
4322 adap->rawf_cnt = val[1] - val[0] + 1;
4326 /* qids (ingress/egress) returned from firmware can be anywhere
4327 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
4328 * Hence driver needs to allocate memory for this range to
4329 * store the queue info. Get the highest IQFLINT/EQ index returned
4330 * in FW_EQ_*_CMD.alloc command.
4332 params[0] = FW_PARAM_PFVF(EQ_END);
4333 params[1] = FW_PARAM_PFVF(IQFLINT_END);
4334 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4337 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
4338 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
4340 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
4341 sizeof(*adap->sge.egr_map), GFP_KERNEL);
4342 if (!adap->sge.egr_map) {
4347 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
4348 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
4349 if (!adap->sge.ingr_map) {
4354 /* Allocate the memory for the vaious egress queue bitmaps
4355 * ie starving_fl, txq_maperr and blocked_fl.
4357 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4358 sizeof(long), GFP_KERNEL);
4359 if (!adap->sge.starving_fl) {
4364 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4365 sizeof(long), GFP_KERNEL);
4366 if (!adap->sge.txq_maperr) {
4371 #ifdef CONFIG_DEBUG_FS
4372 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4373 sizeof(long), GFP_KERNEL);
4374 if (!adap->sge.blocked_fl) {
4380 params[0] = FW_PARAM_PFVF(CLIP_START);
4381 params[1] = FW_PARAM_PFVF(CLIP_END);
4382 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4385 adap->clipt_start = val[0];
4386 adap->clipt_end = val[1];
4388 /* We don't yet have a PARAMs calls to retrieve the number of Traffic
4389 * Classes supported by the hardware/firmware so we hard code it here
4392 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
4394 /* query params related to active filter region */
4395 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4396 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4397 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4398 /* If Active filter size is set we enable establishing
4399 * offload connection through firmware work request
4401 if ((val[0] != val[1]) && (ret >= 0)) {
4402 adap->flags |= FW_OFLD_CONN;
4403 adap->tids.aftid_base = val[0];
4404 adap->tids.aftid_end = val[1];
4407 /* If we're running on newer firmware, let it know that we're
4408 * prepared to deal with encapsulated CPL messages. Older
4409 * firmware won't understand this and we'll just get
4410 * unencapsulated messages ...
4412 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4414 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
4417 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
4418 * capability. Earlier versions of the firmware didn't have the
4419 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
4420 * permission to use ULPTX MEMWRITE DSGL.
4422 if (is_t4(adap->params.chip)) {
4423 adap->params.ulptx_memwrite_dsgl = false;
4425 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
4426 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4428 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
4431 /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
4432 params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
4433 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4435 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
4437 /* See if FW supports FW_FILTER2 work request */
4438 if (is_t4(adap->params.chip)) {
4439 adap->params.filter2_wr_support = 0;
4441 params[0] = FW_PARAM_DEV(FILTER2_WR);
4442 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4444 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
4448 * Get device capabilities so we can determine what resources we need
4451 memset(&caps_cmd, 0, sizeof(caps_cmd));
4452 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4453 FW_CMD_REQUEST_F | FW_CMD_READ_F);
4454 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4455 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4460 if (caps_cmd.ofldcaps ||
4461 (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
4462 /* query offload-related parameters */
4463 params[0] = FW_PARAM_DEV(NTID);
4464 params[1] = FW_PARAM_PFVF(SERVER_START);
4465 params[2] = FW_PARAM_PFVF(SERVER_END);
4466 params[3] = FW_PARAM_PFVF(TDDP_START);
4467 params[4] = FW_PARAM_PFVF(TDDP_END);
4468 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4469 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4473 adap->tids.ntids = val[0];
4474 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4475 adap->tids.stid_base = val[1];
4476 adap->tids.nstids = val[2] - val[1] + 1;
4478 * Setup server filter region. Divide the available filter
4479 * region into two parts. Regular filters get 1/3rd and server
4480 * filters get 2/3rd part. This is only enabled if workarond
4482 * 1. For regular filters.
4483 * 2. Server filter: This are special filters which are used
4484 * to redirect SYN packets to offload queue.
4486 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4487 adap->tids.sftid_base = adap->tids.ftid_base +
4488 DIV_ROUND_UP(adap->tids.nftids, 3);
4489 adap->tids.nsftids = adap->tids.nftids -
4490 DIV_ROUND_UP(adap->tids.nftids, 3);
4491 adap->tids.nftids = adap->tids.sftid_base -
4492 adap->tids.ftid_base;
4494 adap->vres.ddp.start = val[3];
4495 adap->vres.ddp.size = val[4] - val[3] + 1;
4496 adap->params.ofldq_wr_cred = val[5];
4498 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
4499 ret = init_hash_filter(adap);
4503 adap->params.offload = 1;
4504 adap->num_ofld_uld += 1;
4507 if (caps_cmd.rdmacaps) {
4508 params[0] = FW_PARAM_PFVF(STAG_START);
4509 params[1] = FW_PARAM_PFVF(STAG_END);
4510 params[2] = FW_PARAM_PFVF(RQ_START);
4511 params[3] = FW_PARAM_PFVF(RQ_END);
4512 params[4] = FW_PARAM_PFVF(PBL_START);
4513 params[5] = FW_PARAM_PFVF(PBL_END);
4514 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4518 adap->vres.stag.start = val[0];
4519 adap->vres.stag.size = val[1] - val[0] + 1;
4520 adap->vres.rq.start = val[2];
4521 adap->vres.rq.size = val[3] - val[2] + 1;
4522 adap->vres.pbl.start = val[4];
4523 adap->vres.pbl.size = val[5] - val[4] + 1;
4525 params[0] = FW_PARAM_PFVF(SRQ_START);
4526 params[1] = FW_PARAM_PFVF(SRQ_END);
4527 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4530 adap->vres.srq.start = val[0];
4531 adap->vres.srq.size = val[1] - val[0] + 1;
4533 if (adap->vres.srq.size) {
4534 adap->srq = t4_init_srq(adap->vres.srq.size);
4536 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
4539 params[0] = FW_PARAM_PFVF(SQRQ_START);
4540 params[1] = FW_PARAM_PFVF(SQRQ_END);
4541 params[2] = FW_PARAM_PFVF(CQ_START);
4542 params[3] = FW_PARAM_PFVF(CQ_END);
4543 params[4] = FW_PARAM_PFVF(OCQ_START);
4544 params[5] = FW_PARAM_PFVF(OCQ_END);
4545 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
4549 adap->vres.qp.start = val[0];
4550 adap->vres.qp.size = val[1] - val[0] + 1;
4551 adap->vres.cq.start = val[2];
4552 adap->vres.cq.size = val[3] - val[2] + 1;
4553 adap->vres.ocq.start = val[4];
4554 adap->vres.ocq.size = val[5] - val[4] + 1;
4556 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
4557 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4558 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
4561 adap->params.max_ordird_qp = 8;
4562 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
4565 adap->params.max_ordird_qp = val[0];
4566 adap->params.max_ird_adapter = val[1];
4568 dev_info(adap->pdev_dev,
4569 "max_ordird_qp %d max_ird_adapter %d\n",
4570 adap->params.max_ordird_qp,
4571 adap->params.max_ird_adapter);
4573 /* Enable write_with_immediate if FW supports it */
4574 params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
4575 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
4577 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
4579 /* Enable write_cmpl if FW supports it */
4580 params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
4581 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
4583 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
4584 adap->num_ofld_uld += 2;
4586 if (caps_cmd.iscsicaps) {
4587 params[0] = FW_PARAM_PFVF(ISCSI_START);
4588 params[1] = FW_PARAM_PFVF(ISCSI_END);
4589 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4593 adap->vres.iscsi.start = val[0];
4594 adap->vres.iscsi.size = val[1] - val[0] + 1;
4595 /* LIO target and cxgb4i initiaitor */
4596 adap->num_ofld_uld += 2;
4598 if (caps_cmd.cryptocaps) {
4599 if (ntohs(caps_cmd.cryptocaps) &
4600 FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
4601 params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
4602 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4608 adap->vres.ncrypto_fc = val[0];
4610 adap->num_ofld_uld += 1;
4612 if (ntohs(caps_cmd.cryptocaps) &
4613 FW_CAPS_CONFIG_TLS_INLINE) {
4614 params[0] = FW_PARAM_PFVF(TLS_START);
4615 params[1] = FW_PARAM_PFVF(TLS_END);
4616 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4620 adap->vres.key.start = val[0];
4621 adap->vres.key.size = val[1] - val[0] + 1;
4624 adap->params.crypto = ntohs(caps_cmd.cryptocaps);
4626 #undef FW_PARAM_PFVF
4629 /* The MTU/MSS Table is initialized by now, so load their values. If
4630 * we're initializing the adapter, then we'll make any modifications
4631 * we want to the MTU/MSS Table and also initialize the congestion
4634 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4635 if (state != DEV_STATE_INIT) {
4638 /* The default MTU Table contains values 1492 and 1500.
4639 * However, for TCP, it's better to have two values which are
4640 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4641 * This allows us to have a TCP Data Payload which is a
4642 * multiple of 8 regardless of what combination of TCP Options
4643 * are in use (always a multiple of 4 bytes) which is
4644 * important for performance reasons. For instance, if no
4645 * options are in use, then we have a 20-byte IP header and a
4646 * 20-byte TCP header. In this case, a 1500-byte MSS would
4647 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4648 * which is not a multiple of 8. So using an MSS of 1488 in
4649 * this case results in a TCP Data Payload of 1448 bytes which
4650 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4651 * Stamps have been negotiated, then an MTU of 1500 bytes
4652 * results in a TCP Data Payload of 1448 bytes which, as
4653 * above, is a multiple of 8 bytes ...
4655 for (i = 0; i < NMTUS; i++)
4656 if (adap->params.mtus[i] == 1492) {
4657 adap->params.mtus[i] = 1488;
4661 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4662 adap->params.b_wnd);
4664 t4_init_sge_params(adap);
4665 adap->flags |= FW_OK;
4666 t4_init_tp_params(adap, true);
4670 * Something bad happened. If a command timed out or failed with EIO
4671 * FW does not operate within its spec or something catastrophic
4672 * happened to HW/FW, stop issuing commands.
4675 adap_free_hma_mem(adap);
4676 kfree(adap->sge.egr_map);
4677 kfree(adap->sge.ingr_map);
4678 kfree(adap->sge.starving_fl);
4679 kfree(adap->sge.txq_maperr);
4680 #ifdef CONFIG_DEBUG_FS
4681 kfree(adap->sge.blocked_fl);
4683 if (ret != -ETIMEDOUT && ret != -EIO)
4684 t4_fw_bye(adap, adap->mbox);
4690 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4691 pci_channel_state_t state)
4694 struct adapter *adap = pci_get_drvdata(pdev);
4700 adap->flags &= ~FW_OK;
4701 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
4702 spin_lock(&adap->stats_lock);
4703 for_each_port(adap, i) {
4704 struct net_device *dev = adap->port[i];
4706 netif_device_detach(dev);
4707 netif_carrier_off(dev);
4710 spin_unlock(&adap->stats_lock);
4711 disable_interrupts(adap);
4712 if (adap->flags & FULL_INIT_DONE)
4715 if ((adap->flags & DEV_ENABLED)) {
4716 pci_disable_device(pdev);
4717 adap->flags &= ~DEV_ENABLED;
4719 out: return state == pci_channel_io_perm_failure ?
4720 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4723 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4726 struct fw_caps_config_cmd c;
4727 struct adapter *adap = pci_get_drvdata(pdev);
4730 pci_restore_state(pdev);
4731 pci_save_state(pdev);
4732 return PCI_ERS_RESULT_RECOVERED;
4735 if (!(adap->flags & DEV_ENABLED)) {
4736 if (pci_enable_device(pdev)) {
4737 dev_err(&pdev->dev, "Cannot reenable PCI "
4738 "device after reset\n");
4739 return PCI_ERS_RESULT_DISCONNECT;
4741 adap->flags |= DEV_ENABLED;
4744 pci_set_master(pdev);
4745 pci_restore_state(pdev);
4746 pci_save_state(pdev);
4747 pci_cleanup_aer_uncorrect_error_status(pdev);
4749 if (t4_wait_dev_ready(adap->regs) < 0)
4750 return PCI_ERS_RESULT_DISCONNECT;
4751 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
4752 return PCI_ERS_RESULT_DISCONNECT;
4753 adap->flags |= FW_OK;
4754 if (adap_init1(adap, &c))
4755 return PCI_ERS_RESULT_DISCONNECT;
4757 for_each_port(adap, i) {
4758 struct port_info *p = adap2pinfo(adap, i);
4760 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
4763 return PCI_ERS_RESULT_DISCONNECT;
4765 p->xact_addr_filt = -1;
4768 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4769 adap->params.b_wnd);
4772 return PCI_ERS_RESULT_DISCONNECT;
4773 return PCI_ERS_RESULT_RECOVERED;
4776 static void eeh_resume(struct pci_dev *pdev)
4779 struct adapter *adap = pci_get_drvdata(pdev);
4785 for_each_port(adap, i) {
4786 struct net_device *dev = adap->port[i];
4788 if (netif_running(dev)) {
4790 cxgb_set_rxmode(dev);
4792 netif_device_attach(dev);
4798 static const struct pci_error_handlers cxgb4_eeh = {
4799 .error_detected = eeh_err_detected,
4800 .slot_reset = eeh_slot_reset,
4801 .resume = eeh_resume,
4804 /* Return true if the Link Configuration supports "High Speeds" (those greater
4807 static inline bool is_x_10g_port(const struct link_config *lc)
4809 unsigned int speeds, high_speeds;
4811 speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
4812 high_speeds = speeds &
4813 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
4815 return high_speeds != 0;
4819 * Perform default configuration of DMA queues depending on the number and type
4820 * of ports we found and the number of available CPUs. Most settings can be
4821 * modified by the admin prior to actual use.
4823 static int cfg_queues(struct adapter *adap)
4825 struct sge *s = &adap->sge;
4826 int i, n10g = 0, qidx = 0;
4827 int niqflint, neq, avail_eth_qsets;
4828 int max_eth_qsets = 32;
4829 #ifndef CONFIG_CHELSIO_T4_DCB
4833 /* Reduce memory usage in kdump environment, disable all offload.
4835 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
4836 adap->params.offload = 0;
4837 adap->params.crypto = 0;
4840 /* Calculate the number of Ethernet Queue Sets available based on
4841 * resources provisioned for us. We always have an Asynchronous
4842 * Firmware Event Ingress Queue. If we're operating in MSI or Legacy
4843 * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
4844 * Ingress Queue. Meanwhile, we need two Egress Queues for each
4845 * Queue Set: one for the Free List and one for the Ethernet TX Queue.
4847 * Note that we should also take into account all of the various
4848 * Offload Queues. But, in any situation where we're operating in
4849 * a Resource Constrained Provisioning environment, doing any Offload
4850 * at all is problematic ...
4852 niqflint = adap->params.pfres.niqflint - 1;
4853 if (!(adap->flags & USING_MSIX))
4855 neq = adap->params.pfres.neq / 2;
4856 avail_eth_qsets = min(niqflint, neq);
4858 if (avail_eth_qsets > max_eth_qsets)
4859 avail_eth_qsets = max_eth_qsets;
4861 if (avail_eth_qsets < adap->params.nports) {
4862 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
4863 avail_eth_qsets, adap->params.nports);
4867 /* Count the number of 10Gb/s or better ports */
4868 for_each_port(adap, i)
4869 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
4871 #ifdef CONFIG_CHELSIO_T4_DCB
4872 /* For Data Center Bridging support we need to be able to support up
4873 * to 8 Traffic Priorities; each of which will be assigned to its
4874 * own TX Queue in order to prevent Head-Of-Line Blocking.
4876 if (adap->params.nports * 8 > avail_eth_qsets) {
4877 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
4878 avail_eth_qsets, adap->params.nports * 8);
4882 for_each_port(adap, i) {
4883 struct port_info *pi = adap2pinfo(adap, i);
4885 pi->first_qset = qidx;
4886 pi->nqsets = is_kdump_kernel() ? 1 : 8;
4889 #else /* !CONFIG_CHELSIO_T4_DCB */
4891 * We default to 1 queue per non-10G port and up to # of cores queues
4895 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
4896 if (q10g > netif_get_num_default_rss_queues())
4897 q10g = netif_get_num_default_rss_queues();
4899 if (is_kdump_kernel())
4902 for_each_port(adap, i) {
4903 struct port_info *pi = adap2pinfo(adap, i);
4905 pi->first_qset = qidx;
4906 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
4909 #endif /* !CONFIG_CHELSIO_T4_DCB */
4912 s->max_ethqsets = qidx; /* MSI-X may lower it later */
4916 * For offload we use 1 queue/channel if all ports are up to 1G,
4917 * otherwise we divide all available queues amongst the channels
4918 * capped by the number of available cores.
4921 i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
4922 s->ofldqsets = roundup(i, adap->params.nports);
4924 s->ofldqsets = adap->params.nports;
4928 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4929 struct sge_eth_rxq *r = &s->ethrxq[i];
4931 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
4935 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4936 s->ethtxq[i].q.size = 1024;
4938 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4939 s->ctrlq[i].q.size = 512;
4941 if (!is_t4(adap->params.chip))
4942 s->ptptxq.q.size = 8;
4944 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
4945 init_rspq(adap, &s->intrq, 0, 1, 512, 64);
4951 * Reduce the number of Ethernet queues across all ports to at most n.
4952 * n provides at least one queue per port.
4954 static void reduce_ethqs(struct adapter *adap, int n)
4957 struct port_info *pi;
4959 while (n < adap->sge.ethqsets)
4960 for_each_port(adap, i) {
4961 pi = adap2pinfo(adap, i);
4962 if (pi->nqsets > 1) {
4964 adap->sge.ethqsets--;
4965 if (adap->sge.ethqsets <= n)
4971 for_each_port(adap, i) {
4972 pi = adap2pinfo(adap, i);
4978 static int get_msix_info(struct adapter *adap)
4980 struct uld_msix_info *msix_info;
4981 unsigned int max_ingq = 0;
4983 if (is_offload(adap))
4984 max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
4985 if (is_pci_uld(adap))
4986 max_ingq += MAX_OFLD_QSETS * adap->num_uld;
4991 msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
4995 adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
4996 sizeof(long), GFP_KERNEL);
4997 if (!adap->msix_bmap_ulds.msix_bmap) {
5001 spin_lock_init(&adap->msix_bmap_ulds.lock);
5002 adap->msix_info_ulds = msix_info;
5007 static void free_msix_info(struct adapter *adap)
5009 if (!(adap->num_uld && adap->num_ofld_uld))
5012 kfree(adap->msix_info_ulds);
5013 kfree(adap->msix_bmap_ulds.msix_bmap);
5016 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5017 #define EXTRA_VECS 2
5019 static int enable_msix(struct adapter *adap)
5021 int ofld_need = 0, uld_need = 0;
5022 int i, j, want, need, allocated;
5023 struct sge *s = &adap->sge;
5024 unsigned int nchan = adap->params.nports;
5025 struct msix_entry *entries;
5026 int max_ingq = MAX_INGQ;
5028 if (is_pci_uld(adap))
5029 max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
5030 if (is_offload(adap))
5031 max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
5032 entries = kmalloc_array(max_ingq + 1, sizeof(*entries),
5038 if (get_msix_info(adap)) {
5039 adap->params.offload = 0;
5040 adap->params.crypto = 0;
5043 for (i = 0; i < max_ingq + 1; ++i)
5044 entries[i].entry = i;
5046 want = s->max_ethqsets + EXTRA_VECS;
5047 if (is_offload(adap)) {
5048 want += adap->num_ofld_uld * s->ofldqsets;
5049 ofld_need = adap->num_ofld_uld * nchan;
5051 if (is_pci_uld(adap)) {
5052 want += adap->num_uld * s->ofldqsets;
5053 uld_need = adap->num_uld * nchan;
5055 #ifdef CONFIG_CHELSIO_T4_DCB
5056 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5059 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
5061 need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
5063 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
5064 if (allocated < 0) {
5065 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
5066 " not using MSI-X\n");
5071 /* Distribute available vectors to the various queue groups.
5072 * Every group gets its minimum requirement and NIC gets top
5073 * priority for leftovers.
5075 i = allocated - EXTRA_VECS - ofld_need - uld_need;
5076 if (i < s->max_ethqsets) {
5077 s->max_ethqsets = i;
5078 if (i < s->ethqsets)
5079 reduce_ethqs(adap, i);
5082 if (allocated < want)
5083 s->nqs_per_uld = nchan;
5085 s->nqs_per_uld = s->ofldqsets;
5088 for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
5089 adap->msix_info[i].vec = entries[i].vector;
5091 for (j = 0 ; i < allocated; ++i, j++) {
5092 adap->msix_info_ulds[j].vec = entries[i].vector;
5093 adap->msix_info_ulds[j].idx = i;
5095 adap->msix_bmap_ulds.mapsize = j;
5097 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
5098 "nic %d per uld %d\n",
5099 allocated, s->max_ethqsets, s->nqs_per_uld);
5107 static int init_rss(struct adapter *adap)
5112 err = t4_init_rss_mode(adap, adap->mbox);
5116 for_each_port(adap, i) {
5117 struct port_info *pi = adap2pinfo(adap, i);
5119 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5126 /* Dump basic information about the adapter */
5127 static void print_adapter_info(struct adapter *adapter)
5129 /* Hardware/Firmware/etc. Version/Revision IDs */
5130 t4_dump_version_info(adapter);
5132 /* Software/Hardware configuration */
5133 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
5134 is_offload(adapter) ? "R" : "",
5135 ((adapter->flags & USING_MSIX) ? "MSI-X" :
5136 (adapter->flags & USING_MSI) ? "MSI" : ""),
5137 is_offload(adapter) ? "Offload" : "non-Offload");
5140 static void print_port_info(const struct net_device *dev)
5144 const struct port_info *pi = netdev_priv(dev);
5145 const struct adapter *adap = pi->adapter;
5147 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
5148 bufp += sprintf(bufp, "100M/");
5149 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
5150 bufp += sprintf(bufp, "1G/");
5151 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
5152 bufp += sprintf(bufp, "10G/");
5153 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
5154 bufp += sprintf(bufp, "25G/");
5155 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
5156 bufp += sprintf(bufp, "40G/");
5157 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
5158 bufp += sprintf(bufp, "50G/");
5159 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
5160 bufp += sprintf(bufp, "100G/");
5161 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
5162 bufp += sprintf(bufp, "200G/");
5163 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
5164 bufp += sprintf(bufp, "400G/");
5167 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
5169 netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
5170 dev->name, adap->params.vpd.id, adap->name, buf);
5174 * Free the following resources:
5175 * - memory used for tables
5178 * - resources FW is holding for us
5180 static void free_some_resources(struct adapter *adapter)
5184 kvfree(adapter->mps_encap);
5185 kvfree(adapter->smt);
5186 kvfree(adapter->l2t);
5187 kvfree(adapter->srq);
5188 t4_cleanup_sched(adapter);
5189 kvfree(adapter->tids.tid_tab);
5190 cxgb4_cleanup_tc_flower(adapter);
5191 cxgb4_cleanup_tc_u32(adapter);
5192 kfree(adapter->sge.egr_map);
5193 kfree(adapter->sge.ingr_map);
5194 kfree(adapter->sge.starving_fl);
5195 kfree(adapter->sge.txq_maperr);
5196 #ifdef CONFIG_DEBUG_FS
5197 kfree(adapter->sge.blocked_fl);
5199 disable_msi(adapter);
5201 for_each_port(adapter, i)
5202 if (adapter->port[i]) {
5203 struct port_info *pi = adap2pinfo(adapter, i);
5206 t4_free_vi(adapter, adapter->mbox, adapter->pf,
5208 kfree(adap2pinfo(adapter, i)->rss);
5209 free_netdev(adapter->port[i]);
5211 if (adapter->flags & FW_OK)
5212 t4_fw_bye(adapter, adapter->pf);
5215 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5216 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5217 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5218 #define SEGMENT_SIZE 128
5220 static int t4_get_chip_type(struct adapter *adap, int ver)
5222 u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
5226 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
5228 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
5230 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
5237 #ifdef CONFIG_PCI_IOV
5238 static void cxgb4_mgmt_setup(struct net_device *dev)
5240 dev->type = ARPHRD_NONE;
5242 dev->hard_header_len = 0;
5244 dev->tx_queue_len = 0;
5245 dev->flags |= IFF_NOARP;
5246 dev->priv_flags |= IFF_NO_QUEUE;
5248 /* Initialize the device structure. */
5249 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
5250 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
5253 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
5255 struct adapter *adap = pci_get_drvdata(pdev);
5257 int current_vfs = pci_num_vf(pdev);
5260 pcie_fw = readl(adap->regs + PCIE_FW_A);
5261 /* Check if fw is initialized */
5262 if (!(pcie_fw & PCIE_FW_INIT_F)) {
5263 dev_warn(&pdev->dev, "Device not initialized\n");
5267 /* If any of the VF's is already assigned to Guest OS, then
5268 * SRIOV for the same cannot be modified
5270 if (current_vfs && pci_vfs_assigned(pdev)) {
5272 "Cannot modify SR-IOV while VFs are assigned\n");
5275 /* Note that the upper-level code ensures that we're never called with
5276 * a non-zero "num_vfs" when we already have VFs instantiated. But
5277 * it never hurts to code defensively.
5279 if (num_vfs != 0 && current_vfs != 0)
5282 /* Nothing to do for no change. */
5283 if (num_vfs == current_vfs)
5286 /* Disable SRIOV when zero is passed. */
5288 pci_disable_sriov(pdev);
5289 /* free VF Management Interface */
5290 unregister_netdev(adap->port[0]);
5291 free_netdev(adap->port[0]);
5292 adap->port[0] = NULL;
5294 /* free VF resources */
5296 kfree(adap->vfinfo);
5297 adap->vfinfo = NULL;
5302 struct fw_pfvf_cmd port_cmd, port_rpl;
5303 struct net_device *netdev;
5304 unsigned int pmask, port;
5305 struct pci_dev *pbridge;
5306 struct port_info *pi;
5307 char name[IFNAMSIZ];
5312 /* If we want to instantiate Virtual Functions, then our
5313 * parent bridge's PCI-E needs to support Alternative Routing
5314 * ID (ARI) because our VFs will show up at function offset 8
5317 pbridge = pdev->bus->self;
5318 pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
5319 pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
5320 pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
5322 if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
5323 !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
5324 /* Our parent bridge does not support ARI so issue a
5325 * warning and skip instantiating the VFs. They
5326 * won't be reachable.
5328 dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
5329 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
5330 PCI_FUNC(pbridge->devfn));
5333 memset(&port_cmd, 0, sizeof(port_cmd));
5334 port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
5337 FW_PFVF_CMD_PFN_V(adap->pf) |
5338 FW_PFVF_CMD_VFN_V(0));
5339 port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
5340 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
5344 pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
5345 port = ffs(pmask) - 1;
5346 /* Allocate VF Management Interface. */
5347 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
5349 netdev = alloc_netdev(sizeof(struct port_info),
5350 name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
5354 pi = netdev_priv(netdev);
5358 SET_NETDEV_DEV(netdev, &pdev->dev);
5360 adap->port[0] = netdev;
5363 err = register_netdev(adap->port[0]);
5365 pr_info("Unable to register VF mgmt netdev %s\n", name);
5366 free_netdev(adap->port[0]);
5367 adap->port[0] = NULL;
5370 /* Allocate and set up VF Information. */
5371 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
5372 sizeof(struct vf_info), GFP_KERNEL);
5373 if (!adap->vfinfo) {
5374 unregister_netdev(adap->port[0]);
5375 free_netdev(adap->port[0]);
5376 adap->port[0] = NULL;
5379 cxgb4_mgmt_fill_vf_station_mac_addr(adap);
5381 /* Instantiate the requested number of VFs. */
5382 err = pci_enable_sriov(pdev, num_vfs);
5384 pr_info("Unable to instantiate %d VFs\n", num_vfs);
5386 unregister_netdev(adap->port[0]);
5387 free_netdev(adap->port[0]);
5388 adap->port[0] = NULL;
5389 kfree(adap->vfinfo);
5390 adap->vfinfo = NULL;
5395 adap->num_vfs = num_vfs;
5398 #endif /* CONFIG_PCI_IOV */
5400 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5402 struct net_device *netdev;
5403 struct adapter *adapter;
5404 static int adap_idx = 1;
5405 int s_qpp, qpp, num_seg;
5406 struct port_info *pi;
5407 bool highdma = false;
5408 enum chip_type chip;
5415 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5417 err = pci_request_regions(pdev, KBUILD_MODNAME);
5419 /* Just info, some other driver may have claimed the device. */
5420 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5424 err = pci_enable_device(pdev);
5426 dev_err(&pdev->dev, "cannot enable PCI device\n");
5427 goto out_release_regions;
5430 regs = pci_ioremap_bar(pdev, 0);
5432 dev_err(&pdev->dev, "cannot map device registers\n");
5434 goto out_disable_device;
5437 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5440 goto out_unmap_bar0;
5443 adapter->regs = regs;
5444 err = t4_wait_dev_ready(regs);
5446 goto out_free_adapter;
5448 /* We control everything through one PF */
5449 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5450 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
5451 chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
5452 if ((int)chip < 0) {
5453 dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
5455 goto out_free_adapter;
5457 chip_ver = CHELSIO_CHIP_VERSION(chip);
5458 func = chip_ver <= CHELSIO_T5 ?
5459 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5461 adapter->pdev = pdev;
5462 adapter->pdev_dev = &pdev->dev;
5463 adapter->name = pci_name(pdev);
5464 adapter->mbox = func;
5466 adapter->params.chip = chip;
5467 adapter->adap_idx = adap_idx;
5468 adapter->msg_enable = DFLT_MSG_ENABLE;
5469 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
5470 (sizeof(struct mbox_cmd) *
5471 T4_OS_LOG_MBOX_CMDS),
5473 if (!adapter->mbox_log) {
5475 goto out_free_adapter;
5477 spin_lock_init(&adapter->mbox_lock);
5478 INIT_LIST_HEAD(&adapter->mlist.list);
5479 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
5480 pci_set_drvdata(pdev, adapter);
5482 if (func != ent->driver_data) {
5483 pci_disable_device(pdev);
5484 pci_save_state(pdev); /* to restore SR-IOV later */
5488 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5490 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5492 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5493 "coherent allocations\n");
5494 goto out_free_adapter;
5497 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5499 dev_err(&pdev->dev, "no usable DMA configuration\n");
5500 goto out_free_adapter;
5504 pci_enable_pcie_error_reporting(pdev);
5505 pci_set_master(pdev);
5506 pci_save_state(pdev);
5508 adapter->workq = create_singlethread_workqueue("cxgb4");
5509 if (!adapter->workq) {
5511 goto out_free_adapter;
5514 /* PCI device has been enabled */
5515 adapter->flags |= DEV_ENABLED;
5516 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5518 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
5519 * Ingress Packet Data to Free List Buffers in order to allow for
5520 * chipset performance optimizations between the Root Complex and
5521 * Memory Controllers. (Messages to the associated Ingress Queue
5522 * notifying new Packet Placement in the Free Lists Buffers will be
5523 * send without the Relaxed Ordering Attribute thus guaranteeing that
5524 * all preceding PCIe Transaction Layer Packets will be processed
5525 * first.) But some Root Complexes have various issues with Upstream
5526 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
5527 * The PCIe devices which under the Root Complexes will be cleared the
5528 * Relaxed Ordering bit in the configuration space, So we check our
5529 * PCIe configuration space to see if it's flagged with advice against
5530 * using Relaxed Ordering.
5532 if (!pcie_relaxed_ordering_enabled(pdev))
5533 adapter->flags |= ROOT_NO_RELAXED_ORDERING;
5535 spin_lock_init(&adapter->stats_lock);
5536 spin_lock_init(&adapter->tid_release_lock);
5537 spin_lock_init(&adapter->win0_lock);
5539 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5540 INIT_WORK(&adapter->db_full_task, process_db_full);
5541 INIT_WORK(&adapter->db_drop_task, process_db_drop);
5542 INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
5544 err = t4_prep_adapter(adapter);
5546 goto out_free_adapter;
5548 if (is_kdump_kernel()) {
5549 /* Collect hardware state and append to /proc/vmcore */
5550 err = cxgb4_cudbg_vmcore_add_dump(adapter);
5552 dev_warn(adapter->pdev_dev,
5553 "Fail collecting vmcore device dump, err: %d. Continuing\n",
5559 if (!is_t4(adapter->params.chip)) {
5560 s_qpp = (QUEUESPERPAGEPF0_S +
5561 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
5563 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
5564 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
5565 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5567 /* Each segment size is 128B. Write coalescing is enabled only
5568 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5569 * queue is less no of segments that can be accommodated in
5572 if (qpp > num_seg) {
5574 "Incorrect number of egress queues per page\n");
5576 goto out_free_adapter;
5578 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5579 pci_resource_len(pdev, 2));
5580 if (!adapter->bar2) {
5581 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5583 goto out_free_adapter;
5587 setup_memwin(adapter);
5588 err = adap_init0(adapter);
5589 #ifdef CONFIG_DEBUG_FS
5590 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
5592 setup_memwin_rdma(adapter);
5596 /* configure SGE_STAT_CFG_A to read WC stats */
5597 if (!is_t4(adapter->params.chip))
5598 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
5599 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
5602 /* Initialize hash mac addr list */
5603 INIT_LIST_HEAD(&adapter->mac_hlist);
5605 for_each_port(adapter, i) {
5606 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5613 SET_NETDEV_DEV(netdev, &pdev->dev);
5615 adapter->port[i] = netdev;
5616 pi = netdev_priv(netdev);
5617 pi->adapter = adapter;
5618 pi->xact_addr_filt = -1;
5620 netdev->irq = pdev->irq;
5622 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5623 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5624 NETIF_F_RXCSUM | NETIF_F_RXHASH |
5625 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
5628 if (chip_ver > CHELSIO_T5) {
5629 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
5632 NETIF_F_GSO_UDP_TUNNEL |
5633 NETIF_F_TSO | NETIF_F_TSO6;
5635 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
5639 netdev->hw_features |= NETIF_F_HIGHDMA;
5640 netdev->features |= netdev->hw_features;
5641 netdev->vlan_features = netdev->features & VLAN_FEAT;
5643 netdev->priv_flags |= IFF_UNICAST_FLT;
5645 /* MTU range: 81 - 9600 */
5646 netdev->min_mtu = 81; /* accommodate SACK */
5647 netdev->max_mtu = MAX_MTU;
5649 netdev->netdev_ops = &cxgb4_netdev_ops;
5650 #ifdef CONFIG_CHELSIO_T4_DCB
5651 netdev->dcbnl_ops = &cxgb4_dcb_ops;
5652 cxgb4_dcb_state_init(netdev);
5653 cxgb4_dcb_version_init(netdev);
5655 cxgb4_set_ethtool_ops(netdev);
5658 cxgb4_init_ethtool_dump(adapter);
5660 pci_set_drvdata(pdev, adapter);
5662 if (adapter->flags & FW_OK) {
5663 err = t4_port_init(adapter, func, func, 0);
5666 } else if (adapter->params.nports == 1) {
5667 /* If we don't have a connection to the firmware -- possibly
5668 * because of an error -- grab the raw VPD parameters so we
5669 * can set the proper MAC Address on the debug network
5670 * interface that we've created.
5672 u8 hw_addr[ETH_ALEN];
5673 u8 *na = adapter->params.vpd.na;
5675 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
5677 for (i = 0; i < ETH_ALEN; i++)
5678 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
5679 hex2val(na[2 * i + 1]));
5680 t4_set_hw_addr(adapter, 0, hw_addr);
5684 if (!(adapter->flags & FW_OK))
5685 goto fw_attach_fail;
5687 /* Configure queues and allocate tables now, they can be needed as
5688 * soon as the first register_netdev completes.
5690 err = cfg_queues(adapter);
5694 adapter->smt = t4_init_smt();
5695 if (!adapter->smt) {
5696 /* We tolerate a lack of SMT, giving up some functionality */
5697 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
5700 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
5701 if (!adapter->l2t) {
5702 /* We tolerate a lack of L2T, giving up some functionality */
5703 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5704 adapter->params.offload = 0;
5707 adapter->mps_encap = kvcalloc(adapter->params.arch.mps_tcam_size,
5708 sizeof(struct mps_encap_entry),
5710 if (!adapter->mps_encap)
5711 dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n");
5713 #if IS_ENABLED(CONFIG_IPV6)
5714 if (chip_ver <= CHELSIO_T5 &&
5715 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
5716 /* CLIP functionality is not present in hardware,
5717 * hence disable all offload features
5719 dev_warn(&pdev->dev,
5720 "CLIP not enabled in hardware, continuing\n");
5721 adapter->params.offload = 0;
5723 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
5724 adapter->clipt_end);
5725 if (!adapter->clipt) {
5726 /* We tolerate a lack of clip_table, giving up
5727 * some functionality
5729 dev_warn(&pdev->dev,
5730 "could not allocate Clip table, continuing\n");
5731 adapter->params.offload = 0;
5736 for_each_port(adapter, i) {
5737 pi = adap2pinfo(adapter, i);
5738 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
5740 dev_warn(&pdev->dev,
5741 "could not activate scheduling on port %d\n",
5745 if (tid_init(&adapter->tids) < 0) {
5746 dev_warn(&pdev->dev, "could not allocate TID table, "
5748 adapter->params.offload = 0;
5750 adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
5751 if (!adapter->tc_u32)
5752 dev_warn(&pdev->dev,
5753 "could not offload tc u32, continuing\n");
5755 if (cxgb4_init_tc_flower(adapter))
5756 dev_warn(&pdev->dev,
5757 "could not offload tc flower, continuing\n");
5760 if (is_offload(adapter) || is_hashfilter(adapter)) {
5761 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
5762 u32 hash_base, hash_reg;
5764 if (chip_ver <= CHELSIO_T5) {
5765 hash_reg = LE_DB_TID_HASHBASE_A;
5766 hash_base = t4_read_reg(adapter, hash_reg);
5767 adapter->tids.hash_base = hash_base / 4;
5769 hash_reg = T6_LE_DB_HASH_TID_BASE_A;
5770 hash_base = t4_read_reg(adapter, hash_reg);
5771 adapter->tids.hash_base = hash_base;
5776 /* See what interrupts we'll be using */
5777 if (msi > 1 && enable_msix(adapter) == 0)
5778 adapter->flags |= USING_MSIX;
5779 else if (msi > 0 && pci_enable_msi(pdev) == 0) {
5780 adapter->flags |= USING_MSI;
5782 free_msix_info(adapter);
5785 /* check for PCI Express bandwidth capabiltites */
5786 pcie_print_link_status(pdev);
5788 err = init_rss(adapter);
5792 err = setup_fw_sge_queues(adapter);
5794 dev_err(adapter->pdev_dev,
5795 "FW sge queue allocation failed, err %d", err);
5801 * The card is now ready to go. If any errors occur during device
5802 * registration we do not fail the whole card but rather proceed only
5803 * with the ports we manage to register successfully. However we must
5804 * register at least one net device.
5806 for_each_port(adapter, i) {
5807 pi = adap2pinfo(adapter, i);
5808 adapter->port[i]->dev_port = pi->lport;
5809 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5810 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5812 netif_carrier_off(adapter->port[i]);
5814 err = register_netdev(adapter->port[i]);
5817 adapter->chan_map[pi->tx_chan] = i;
5818 print_port_info(adapter->port[i]);
5821 dev_err(&pdev->dev, "could not register any net devices\n");
5825 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5829 if (cxgb4_debugfs_root) {
5830 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5831 cxgb4_debugfs_root);
5832 setup_debugfs(adapter);
5835 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5836 pdev->needs_freset = 1;
5838 if (is_uld(adapter)) {
5839 mutex_lock(&uld_mutex);
5840 list_add_tail(&adapter->list_node, &adapter_list);
5841 mutex_unlock(&uld_mutex);
5844 if (!is_t4(adapter->params.chip))
5845 cxgb4_ptp_init(adapter);
5847 print_adapter_info(adapter);
5851 t4_free_sge_resources(adapter);
5852 free_some_resources(adapter);
5853 if (adapter->flags & USING_MSIX)
5854 free_msix_info(adapter);
5855 if (adapter->num_uld || adapter->num_ofld_uld)
5856 t4_uld_mem_free(adapter);
5858 if (!is_t4(adapter->params.chip))
5859 iounmap(adapter->bar2);
5862 destroy_workqueue(adapter->workq);
5864 kfree(adapter->mbox_log);
5869 pci_disable_pcie_error_reporting(pdev);
5870 pci_disable_device(pdev);
5871 out_release_regions:
5872 pci_release_regions(pdev);
5876 static void remove_one(struct pci_dev *pdev)
5878 struct adapter *adapter = pci_get_drvdata(pdev);
5879 struct hash_mac_addr *entry, *tmp;
5882 pci_release_regions(pdev);
5886 adapter->flags |= SHUTTING_DOWN;
5888 if (adapter->pf == 4) {
5891 /* Tear down per-adapter Work Queue first since it can contain
5892 * references to our adapter data structure.
5894 destroy_workqueue(adapter->workq);
5896 if (is_uld(adapter)) {
5897 detach_ulds(adapter);
5898 t4_uld_clean_up(adapter);
5901 adap_free_hma_mem(adapter);
5903 disable_interrupts(adapter);
5905 for_each_port(adapter, i)
5906 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5907 unregister_netdev(adapter->port[i]);
5909 debugfs_remove_recursive(adapter->debugfs_root);
5911 if (!is_t4(adapter->params.chip))
5912 cxgb4_ptp_stop(adapter);
5914 /* If we allocated filters, free up state associated with any
5917 clear_all_filters(adapter);
5919 if (adapter->flags & FULL_INIT_DONE)
5922 if (adapter->flags & USING_MSIX)
5923 free_msix_info(adapter);
5924 if (adapter->num_uld || adapter->num_ofld_uld)
5925 t4_uld_mem_free(adapter);
5926 free_some_resources(adapter);
5927 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
5929 list_del(&entry->list);
5933 #if IS_ENABLED(CONFIG_IPV6)
5934 t4_cleanup_clip_tbl(adapter);
5936 if (!is_t4(adapter->params.chip))
5937 iounmap(adapter->bar2);
5939 #ifdef CONFIG_PCI_IOV
5941 cxgb4_iov_configure(adapter->pdev, 0);
5944 iounmap(adapter->regs);
5945 pci_disable_pcie_error_reporting(pdev);
5946 if ((adapter->flags & DEV_ENABLED)) {
5947 pci_disable_device(pdev);
5948 adapter->flags &= ~DEV_ENABLED;
5950 pci_release_regions(pdev);
5951 kfree(adapter->mbox_log);
5956 /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
5957 * delivery. This is essentially a stripped down version of the PCI remove()
5958 * function where we do the minimal amount of work necessary to shutdown any
5961 static void shutdown_one(struct pci_dev *pdev)
5963 struct adapter *adapter = pci_get_drvdata(pdev);
5965 /* As with remove_one() above (see extended comment), we only want do
5966 * do cleanup on PCI Devices which went all the way through init_one()
5970 pci_release_regions(pdev);
5974 adapter->flags |= SHUTTING_DOWN;
5976 if (adapter->pf == 4) {
5979 for_each_port(adapter, i)
5980 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5981 cxgb_close(adapter->port[i]);
5983 if (is_uld(adapter)) {
5984 detach_ulds(adapter);
5985 t4_uld_clean_up(adapter);
5988 disable_interrupts(adapter);
5989 disable_msi(adapter);
5991 t4_sge_stop(adapter);
5992 if (adapter->flags & FW_OK)
5993 t4_fw_bye(adapter, adapter->mbox);
5997 static struct pci_driver cxgb4_driver = {
5998 .name = KBUILD_MODNAME,
5999 .id_table = cxgb4_pci_tbl,
6001 .remove = remove_one,
6002 .shutdown = shutdown_one,
6003 #ifdef CONFIG_PCI_IOV
6004 .sriov_configure = cxgb4_iov_configure,
6006 .err_handler = &cxgb4_eeh,
6009 static int __init cxgb4_init_module(void)
6013 /* Debugfs support is optional, just warn if this fails */
6014 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6015 if (!cxgb4_debugfs_root)
6016 pr_warn("could not create debugfs entry, continuing\n");
6018 ret = pci_register_driver(&cxgb4_driver);
6022 #if IS_ENABLED(CONFIG_IPV6)
6023 if (!inet6addr_registered) {
6024 ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6026 pci_unregister_driver(&cxgb4_driver);
6028 inet6addr_registered = true;
6036 debugfs_remove(cxgb4_debugfs_root);
6041 static void __exit cxgb4_cleanup_module(void)
6043 #if IS_ENABLED(CONFIG_IPV6)
6044 if (inet6addr_registered) {
6045 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6046 inet6addr_registered = false;
6049 pci_unregister_driver(&cxgb4_driver);
6050 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6053 module_init(cxgb4_init_module);
6054 module_exit(cxgb4_cleanup_module);