2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/init.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/if_vlan.h>
43 #include <linux/mdio.h>
44 #include <linux/sockios.h>
45 #include <linux/workqueue.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/stringify.h>
51 #include <linux/sched.h>
52 #include <linux/slab.h>
53 #include <linux/uaccess.h>
54 #include <linux/nospec.h>
57 #include "cxgb3_ioctl.h"
59 #include "cxgb3_offload.h"
62 #include "cxgb3_ctl_defs.h"
64 #include "firmware_exports.h"
67 MAX_TXQ_ENTRIES = 16384,
68 MAX_CTRL_TXQ_ENTRIES = 1024,
69 MAX_RSPQ_ENTRIES = 16384,
70 MAX_RX_BUFFERS = 16384,
71 MAX_RX_JUMBO_BUFFERS = 16384,
73 MIN_CTRL_TXQ_ENTRIES = 4,
74 MIN_RSPQ_ENTRIES = 32,
78 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
80 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
81 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
82 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
84 #define EEPROM_MAGIC 0x38E2F10C
86 #define CH_DEVICE(devid, idx) \
87 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
89 static const struct pci_device_id cxgb3_pci_tbl[] = {
90 CH_DEVICE(0x20, 0), /* PE9000 */
91 CH_DEVICE(0x21, 1), /* T302E */
92 CH_DEVICE(0x22, 2), /* T310E */
93 CH_DEVICE(0x23, 3), /* T320X */
94 CH_DEVICE(0x24, 1), /* T302X */
95 CH_DEVICE(0x25, 3), /* T320E */
96 CH_DEVICE(0x26, 2), /* T310X */
97 CH_DEVICE(0x30, 2), /* T3B10 */
98 CH_DEVICE(0x31, 3), /* T3B20 */
99 CH_DEVICE(0x32, 1), /* T3B02 */
100 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
101 CH_DEVICE(0x36, 3), /* S320E-CR */
102 CH_DEVICE(0x37, 7), /* N320E-G2 */
106 MODULE_DESCRIPTION(DRV_DESC);
107 MODULE_AUTHOR("Chelsio Communications");
108 MODULE_LICENSE("Dual BSD/GPL");
109 MODULE_VERSION(DRV_VERSION);
110 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
112 static int dflt_msg_enable = DFLT_MSG_ENABLE;
114 module_param(dflt_msg_enable, int, 0644);
115 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
118 * The driver uses the best interrupt scheme available on a platform in the
119 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
120 * of these schemes the driver may consider as follows:
122 * msi = 2: choose from among all three options
123 * msi = 1: only consider MSI and pin interrupts
124 * msi = 0: force pin interrupts
128 module_param(msi, int, 0644);
129 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
132 * The driver enables offload as a default.
133 * To disable it, use ofld_disable = 1.
136 static int ofld_disable = 0;
138 module_param(ofld_disable, int, 0644);
139 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
142 * We have work elements that we need to cancel when an interface is taken
143 * down. Normally the work elements would be executed by keventd but that
144 * can deadlock because of linkwatch. If our close method takes the rtnl
145 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
146 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
147 * for our work to complete. Get our own work queue to solve this.
149 struct workqueue_struct *cxgb3_wq;
152 * link_report - show link status and link speed/duplex
153 * @p: the port whose settings are to be reported
155 * Shows the link status, speed, and duplex of a port.
157 static void link_report(struct net_device *dev)
159 if (!netif_carrier_ok(dev))
160 netdev_info(dev, "link down\n");
162 const char *s = "10Mbps";
163 const struct port_info *p = netdev_priv(dev);
165 switch (p->link_config.speed) {
177 netdev_info(dev, "link up, %s, %s-duplex\n",
178 s, p->link_config.duplex == DUPLEX_FULL
183 static void enable_tx_fifo_drain(struct adapter *adapter,
184 struct port_info *pi)
186 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
188 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
189 t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
190 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
193 static void disable_tx_fifo_drain(struct adapter *adapter,
194 struct port_info *pi)
196 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
200 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
202 struct net_device *dev = adap->port[port_id];
203 struct port_info *pi = netdev_priv(dev);
205 if (state == netif_carrier_ok(dev))
209 struct cmac *mac = &pi->mac;
211 netif_carrier_on(dev);
213 disable_tx_fifo_drain(adap, pi);
215 /* Clear local faults */
216 t3_xgm_intr_disable(adap, pi->port_id);
217 t3_read_reg(adap, A_XGM_INT_STATUS +
220 A_XGM_INT_CAUSE + pi->mac.offset,
223 t3_set_reg_field(adap,
226 F_XGM_INT, F_XGM_INT);
227 t3_xgm_intr_enable(adap, pi->port_id);
229 t3_mac_enable(mac, MAC_DIRECTION_TX);
231 netif_carrier_off(dev);
234 enable_tx_fifo_drain(adap, pi);
240 * t3_os_link_changed - handle link status changes
241 * @adapter: the adapter associated with the link change
242 * @port_id: the port index whose limk status has changed
243 * @link_stat: the new status of the link
244 * @speed: the new speed setting
245 * @duplex: the new duplex setting
246 * @pause: the new flow-control setting
248 * This is the OS-dependent handler for link status changes. The OS
249 * neutral handler takes care of most of the processing for these events,
250 * then calls this handler for any OS-specific processing.
252 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
253 int speed, int duplex, int pause)
255 struct net_device *dev = adapter->port[port_id];
256 struct port_info *pi = netdev_priv(dev);
257 struct cmac *mac = &pi->mac;
259 /* Skip changes from disabled ports. */
260 if (!netif_running(dev))
263 if (link_stat != netif_carrier_ok(dev)) {
265 disable_tx_fifo_drain(adapter, pi);
267 t3_mac_enable(mac, MAC_DIRECTION_RX);
269 /* Clear local faults */
270 t3_xgm_intr_disable(adapter, pi->port_id);
271 t3_read_reg(adapter, A_XGM_INT_STATUS +
273 t3_write_reg(adapter,
274 A_XGM_INT_CAUSE + pi->mac.offset,
277 t3_set_reg_field(adapter,
278 A_XGM_INT_ENABLE + pi->mac.offset,
279 F_XGM_INT, F_XGM_INT);
280 t3_xgm_intr_enable(adapter, pi->port_id);
282 netif_carrier_on(dev);
284 netif_carrier_off(dev);
286 t3_xgm_intr_disable(adapter, pi->port_id);
287 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
288 t3_set_reg_field(adapter,
289 A_XGM_INT_ENABLE + pi->mac.offset,
293 pi->phy.ops->power_down(&pi->phy, 1);
295 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
296 t3_mac_disable(mac, MAC_DIRECTION_RX);
297 t3_link_start(&pi->phy, mac, &pi->link_config);
300 enable_tx_fifo_drain(adapter, pi);
308 * t3_os_phymod_changed - handle PHY module changes
309 * @phy: the PHY reporting the module change
310 * @mod_type: new module type
312 * This is the OS-dependent handler for PHY module changes. It is
313 * invoked when a PHY module is removed or inserted for any OS-specific
316 void t3_os_phymod_changed(struct adapter *adap, int port_id)
318 static const char *mod_str[] = {
319 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
322 const struct net_device *dev = adap->port[port_id];
323 const struct port_info *pi = netdev_priv(dev);
325 if (pi->phy.modtype == phy_modtype_none)
326 netdev_info(dev, "PHY module unplugged\n");
328 netdev_info(dev, "%s PHY module inserted\n",
329 mod_str[pi->phy.modtype]);
332 static void cxgb_set_rxmode(struct net_device *dev)
334 struct port_info *pi = netdev_priv(dev);
336 t3_mac_set_rx_mode(&pi->mac, dev);
340 * link_start - enable a port
341 * @dev: the device to enable
343 * Performs the MAC and PHY actions needed to enable a port.
345 static void link_start(struct net_device *dev)
347 struct port_info *pi = netdev_priv(dev);
348 struct cmac *mac = &pi->mac;
351 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
352 t3_mac_set_mtu(mac, dev->mtu);
353 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
354 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
355 t3_mac_set_rx_mode(mac, dev);
356 t3_link_start(&pi->phy, mac, &pi->link_config);
357 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
360 static inline void cxgb_disable_msi(struct adapter *adapter)
362 if (adapter->flags & USING_MSIX) {
363 pci_disable_msix(adapter->pdev);
364 adapter->flags &= ~USING_MSIX;
365 } else if (adapter->flags & USING_MSI) {
366 pci_disable_msi(adapter->pdev);
367 adapter->flags &= ~USING_MSI;
372 * Interrupt handler for asynchronous events used with MSI-X.
374 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
376 t3_slow_intr_handler(cookie);
381 * Name the MSI-X interrupts.
383 static void name_msix_vecs(struct adapter *adap)
385 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
387 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
388 adap->msix_info[0].desc[n] = 0;
390 for_each_port(adap, j) {
391 struct net_device *d = adap->port[j];
392 const struct port_info *pi = netdev_priv(d);
394 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
395 snprintf(adap->msix_info[msi_idx].desc, n,
396 "%s-%d", d->name, pi->first_qset + i);
397 adap->msix_info[msi_idx].desc[n] = 0;
402 static int request_msix_data_irqs(struct adapter *adap)
404 int i, j, err, qidx = 0;
406 for_each_port(adap, i) {
407 int nqsets = adap2pinfo(adap, i)->nqsets;
409 for (j = 0; j < nqsets; ++j) {
410 err = request_irq(adap->msix_info[qidx + 1].vec,
411 t3_intr_handler(adap,
414 adap->msix_info[qidx + 1].desc,
415 &adap->sge.qs[qidx]);
418 free_irq(adap->msix_info[qidx + 1].vec,
419 &adap->sge.qs[qidx]);
428 static void free_irq_resources(struct adapter *adapter)
430 if (adapter->flags & USING_MSIX) {
433 free_irq(adapter->msix_info[0].vec, adapter);
434 for_each_port(adapter, i)
435 n += adap2pinfo(adapter, i)->nqsets;
437 for (i = 0; i < n; ++i)
438 free_irq(adapter->msix_info[i + 1].vec,
439 &adapter->sge.qs[i]);
441 free_irq(adapter->pdev->irq, adapter);
444 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
449 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
457 static int init_tp_parity(struct adapter *adap)
461 struct cpl_set_tcb_field *greq;
462 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
464 t3_tp_set_offload_mode(adap, 1);
466 for (i = 0; i < 16; i++) {
467 struct cpl_smt_write_req *req;
469 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
471 skb = adap->nofail_skb;
475 req = __skb_put_zero(skb, sizeof(*req));
476 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
477 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
478 req->mtu_idx = NMTUS - 1;
480 t3_mgmt_tx(adap, skb);
481 if (skb == adap->nofail_skb) {
482 await_mgmt_replies(adap, cnt, i + 1);
483 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
484 if (!adap->nofail_skb)
489 for (i = 0; i < 2048; i++) {
490 struct cpl_l2t_write_req *req;
492 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
494 skb = adap->nofail_skb;
498 req = __skb_put_zero(skb, sizeof(*req));
499 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
500 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
501 req->params = htonl(V_L2T_W_IDX(i));
502 t3_mgmt_tx(adap, skb);
503 if (skb == adap->nofail_skb) {
504 await_mgmt_replies(adap, cnt, 16 + i + 1);
505 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
506 if (!adap->nofail_skb)
511 for (i = 0; i < 2048; i++) {
512 struct cpl_rte_write_req *req;
514 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
516 skb = adap->nofail_skb;
520 req = __skb_put_zero(skb, sizeof(*req));
521 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
522 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
523 req->l2t_idx = htonl(V_L2T_W_IDX(i));
524 t3_mgmt_tx(adap, skb);
525 if (skb == adap->nofail_skb) {
526 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
527 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
528 if (!adap->nofail_skb)
533 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
535 skb = adap->nofail_skb;
539 greq = __skb_put_zero(skb, sizeof(*greq));
540 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
541 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
542 greq->mask = cpu_to_be64(1);
543 t3_mgmt_tx(adap, skb);
545 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
546 if (skb == adap->nofail_skb) {
547 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
548 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
551 t3_tp_set_offload_mode(adap, 0);
555 t3_tp_set_offload_mode(adap, 0);
560 * setup_rss - configure RSS
563 * Sets up RSS to distribute packets to multiple receive queues. We
564 * configure the RSS CPU lookup table to distribute to the number of HW
565 * receive queues, and the response queue lookup table to narrow that
566 * down to the response queues actually configured for each port.
567 * We always configure the RSS mapping for two ports since the mapping
568 * table has plenty of entries.
570 static void setup_rss(struct adapter *adap)
573 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
574 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
575 u8 cpus[SGE_QSETS + 1];
576 u16 rspq_map[RSS_TABLE_SIZE + 1];
578 for (i = 0; i < SGE_QSETS; ++i)
580 cpus[SGE_QSETS] = 0xff; /* terminator */
582 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
583 rspq_map[i] = i % nq0;
584 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
586 rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
588 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
589 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
590 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
593 static void ring_dbs(struct adapter *adap)
597 for (i = 0; i < SGE_QSETS; i++) {
598 struct sge_qset *qs = &adap->sge.qs[i];
601 for (j = 0; j < SGE_TXQ_PER_SET; j++)
602 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
606 static void init_napi(struct adapter *adap)
610 for (i = 0; i < SGE_QSETS; i++) {
611 struct sge_qset *qs = &adap->sge.qs[i];
614 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
619 * netif_napi_add() can be called only once per napi_struct because it
620 * adds each new napi_struct to a list. Be careful not to call it a
621 * second time, e.g., during EEH recovery, by making a note of it.
623 adap->flags |= NAPI_INIT;
627 * Wait until all NAPI handlers are descheduled. This includes the handlers of
628 * both netdevices representing interfaces and the dummy ones for the extra
631 static void quiesce_rx(struct adapter *adap)
635 for (i = 0; i < SGE_QSETS; i++)
636 if (adap->sge.qs[i].adap)
637 napi_disable(&adap->sge.qs[i].napi);
640 static void enable_all_napi(struct adapter *adap)
643 for (i = 0; i < SGE_QSETS; i++)
644 if (adap->sge.qs[i].adap)
645 napi_enable(&adap->sge.qs[i].napi);
649 * setup_sge_qsets - configure SGE Tx/Rx/response queues
652 * Determines how many sets of SGE queues to use and initializes them.
653 * We support multiple queue sets per port if we have MSI-X, otherwise
654 * just one queue set per port.
656 static int setup_sge_qsets(struct adapter *adap)
658 int i, j, err, irq_idx = 0, qset_idx = 0;
659 unsigned int ntxq = SGE_TXQ_PER_SET;
661 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
664 for_each_port(adap, i) {
665 struct net_device *dev = adap->port[i];
666 struct port_info *pi = netdev_priv(dev);
668 pi->qs = &adap->sge.qs[pi->first_qset];
669 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
670 err = t3_sge_alloc_qset(adap, qset_idx, 1,
671 (adap->flags & USING_MSIX) ? qset_idx + 1 :
673 &adap->params.sge.qset[qset_idx], ntxq, dev,
674 netdev_get_tx_queue(dev, j));
676 t3_free_sge_resources(adap);
685 static ssize_t attr_show(struct device *d, char *buf,
686 ssize_t(*format) (struct net_device *, char *))
690 /* Synchronize with ioctls that may shut down the device */
692 len = (*format) (to_net_dev(d), buf);
697 static ssize_t attr_store(struct device *d,
698 const char *buf, size_t len,
699 ssize_t(*set) (struct net_device *, unsigned int),
700 unsigned int min_val, unsigned int max_val)
705 if (!capable(CAP_NET_ADMIN))
708 ret = kstrtouint(buf, 0, &val);
711 if (val < min_val || val > max_val)
715 ret = (*set) (to_net_dev(d), val);
722 #define CXGB3_SHOW(name, val_expr) \
723 static ssize_t format_##name(struct net_device *dev, char *buf) \
725 struct port_info *pi = netdev_priv(dev); \
726 struct adapter *adap = pi->adapter; \
727 return sprintf(buf, "%u\n", val_expr); \
729 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
732 return attr_show(d, buf, format_##name); \
735 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
737 struct port_info *pi = netdev_priv(dev);
738 struct adapter *adap = pi->adapter;
739 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
741 if (adap->flags & FULL_INIT_DONE)
743 if (val && adap->params.rev == 0)
745 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
748 adap->params.mc5.nfilters = val;
752 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
753 const char *buf, size_t len)
755 return attr_store(d, buf, len, set_nfilters, 0, ~0);
758 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
760 struct port_info *pi = netdev_priv(dev);
761 struct adapter *adap = pi->adapter;
763 if (adap->flags & FULL_INIT_DONE)
765 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
768 adap->params.mc5.nservers = val;
772 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
773 const char *buf, size_t len)
775 return attr_store(d, buf, len, set_nservers, 0, ~0);
778 #define CXGB3_ATTR_R(name, val_expr) \
779 CXGB3_SHOW(name, val_expr) \
780 static DEVICE_ATTR(name, 0444, show_##name, NULL)
782 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
783 CXGB3_SHOW(name, val_expr) \
784 static DEVICE_ATTR(name, 0644, show_##name, store_method)
786 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
787 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
788 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
790 static struct attribute *cxgb3_attrs[] = {
791 &dev_attr_cam_size.attr,
792 &dev_attr_nfilters.attr,
793 &dev_attr_nservers.attr,
797 static const struct attribute_group cxgb3_attr_group = {
798 .attrs = cxgb3_attrs,
801 static ssize_t tm_attr_show(struct device *d,
802 char *buf, int sched)
804 struct port_info *pi = netdev_priv(to_net_dev(d));
805 struct adapter *adap = pi->adapter;
806 unsigned int v, addr, bpt, cpt;
809 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
811 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
812 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
815 bpt = (v >> 8) & 0xff;
818 len = sprintf(buf, "disabled\n");
820 v = (adap->params.vpd.cclk * 1000) / cpt;
821 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
827 static ssize_t tm_attr_store(struct device *d,
828 const char *buf, size_t len, int sched)
830 struct port_info *pi = netdev_priv(to_net_dev(d));
831 struct adapter *adap = pi->adapter;
835 if (!capable(CAP_NET_ADMIN))
838 ret = kstrtouint(buf, 0, &val);
845 ret = t3_config_sched(adap, val, sched);
852 #define TM_ATTR(name, sched) \
853 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
856 return tm_attr_show(d, buf, sched); \
858 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
859 const char *buf, size_t len) \
861 return tm_attr_store(d, buf, len, sched); \
863 static DEVICE_ATTR(name, 0644, show_##name, store_##name)
874 static struct attribute *offload_attrs[] = {
875 &dev_attr_sched0.attr,
876 &dev_attr_sched1.attr,
877 &dev_attr_sched2.attr,
878 &dev_attr_sched3.attr,
879 &dev_attr_sched4.attr,
880 &dev_attr_sched5.attr,
881 &dev_attr_sched6.attr,
882 &dev_attr_sched7.attr,
886 static const struct attribute_group offload_attr_group = {
887 .attrs = offload_attrs,
891 * Sends an sk_buff to an offload queue driver
892 * after dealing with any active network taps.
894 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
899 ret = t3_offload_tx(tdev, skb);
904 static int write_smt_entry(struct adapter *adapter, int idx)
906 struct cpl_smt_write_req *req;
907 struct port_info *pi = netdev_priv(adapter->port[idx]);
908 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
913 req = __skb_put(skb, sizeof(*req));
914 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
915 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
916 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
918 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
919 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
921 offload_tx(&adapter->tdev, skb);
925 static int init_smt(struct adapter *adapter)
929 for_each_port(adapter, i)
930 write_smt_entry(adapter, i);
934 static void init_port_mtus(struct adapter *adapter)
936 unsigned int mtus = adapter->port[0]->mtu;
938 if (adapter->port[1])
939 mtus |= adapter->port[1]->mtu << 16;
940 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
943 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
947 struct mngt_pktsched_wr *req;
950 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
952 skb = adap->nofail_skb;
956 req = skb_put(skb, sizeof(*req));
957 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
958 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
964 ret = t3_mgmt_tx(adap, skb);
965 if (skb == adap->nofail_skb) {
966 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
968 if (!adap->nofail_skb)
975 static int bind_qsets(struct adapter *adap)
979 for_each_port(adap, i) {
980 const struct port_info *pi = adap2pinfo(adap, i);
982 for (j = 0; j < pi->nqsets; ++j) {
983 int ret = send_pktsched_cmd(adap, 1,
984 pi->first_qset + j, -1,
995 #define FW_FNAME "/*(DEBLOBBED)*/"
997 #define TPSRAM_NAME "/*(DEBLOBBED)*/"
998 #define AEL2005_OPT_EDC_NAME "/*(DEBLOBBED)*/"
999 #define AEL2005_TWX_EDC_NAME "/*(DEBLOBBED)*/"
1000 #define AEL2020_TWX_EDC_NAME "/*(DEBLOBBED)*/"
1003 static inline const char *get_edc_fw_name(int edc_idx)
1005 const char *fw_name = NULL;
1008 case EDC_OPT_AEL2005:
1009 fw_name = AEL2005_OPT_EDC_NAME;
1011 case EDC_TWX_AEL2005:
1012 fw_name = AEL2005_TWX_EDC_NAME;
1014 case EDC_TWX_AEL2020:
1015 fw_name = AEL2020_TWX_EDC_NAME;
1021 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1023 struct adapter *adapter = phy->adapter;
1024 const struct firmware *fw;
1025 const char *fw_name;
1028 u16 *cache = phy->phy_cache;
1029 int i, ret = -EINVAL;
1031 fw_name = get_edc_fw_name(edc_idx);
1033 ret = reject_firmware(&fw, fw_name, &adapter->pdev->dev);
1035 dev_err(&adapter->pdev->dev,
1036 "could not upgrade firmware: unable to load %s\n",
1041 /* check size, take checksum in account */
1042 if (fw->size > size + 4) {
1043 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1044 (unsigned int)fw->size, size + 4);
1048 /* compute checksum */
1049 p = (const __be32 *)fw->data;
1050 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1051 csum += ntohl(p[i]);
1053 if (csum != 0xffffffff) {
1054 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1059 for (i = 0; i < size / 4 ; i++) {
1060 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1061 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1064 release_firmware(fw);
1069 static int upgrade_fw(struct adapter *adap)
1072 const struct firmware *fw;
1073 struct device *dev = &adap->pdev->dev;
1075 ret = reject_firmware(&fw, FW_FNAME, dev);
1077 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1081 ret = t3_load_fw(adap, fw->data, fw->size);
1082 release_firmware(fw);
1085 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1086 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1088 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1089 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1094 static inline char t3rev2char(struct adapter *adapter)
1098 switch(adapter->params.rev) {
1110 static int update_tpsram(struct adapter *adap)
1112 const struct firmware *tpsram;
1114 struct device *dev = &adap->pdev->dev;
1118 rev = t3rev2char(adap);
1122 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1124 ret = reject_firmware(&tpsram, buf, dev);
1126 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1131 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1133 goto release_tpsram;
1135 ret = t3_set_proto_sram(adap, tpsram->data);
1138 "successful update of protocol engine "
1140 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1142 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1143 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1145 dev_err(dev, "loading protocol SRAM failed\n");
1148 release_firmware(tpsram);
1154 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1155 * @adap: the adapter
1158 * Ensures that current Rx processing on any of the queues associated with
1159 * the given port completes before returning. We do this by acquiring and
1160 * releasing the locks of the response queues associated with the port.
1162 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1166 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1167 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1169 spin_lock_irq(&q->lock);
1170 spin_unlock_irq(&q->lock);
1174 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1176 struct port_info *pi = netdev_priv(dev);
1177 struct adapter *adapter = pi->adapter;
1179 if (adapter->params.rev > 0) {
1180 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1181 features & NETIF_F_HW_VLAN_CTAG_RX);
1183 /* single control for all ports */
1184 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1186 for_each_port(adapter, i)
1188 adapter->port[i]->features &
1189 NETIF_F_HW_VLAN_CTAG_RX;
1191 t3_set_vlan_accel(adapter, 1, have_vlans);
1193 t3_synchronize_rx(adapter, pi);
1197 * cxgb_up - enable the adapter
1198 * @adapter: adapter being enabled
1200 * Called when the first port is enabled, this function performs the
1201 * actions necessary to make an adapter operational, such as completing
1202 * the initialization of HW modules, and enabling interrupts.
1204 * Must be called with the rtnl lock held.
1206 static int cxgb_up(struct adapter *adap)
1210 if (!(adap->flags & FULL_INIT_DONE)) {
1211 err = t3_check_fw_version(adap);
1212 if (err == -EINVAL) {
1213 err = upgrade_fw(adap);
1214 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1215 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1216 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1219 err = t3_check_tpsram_version(adap);
1220 if (err == -EINVAL) {
1221 err = update_tpsram(adap);
1222 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1223 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1224 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1228 * Clear interrupts now to catch errors if t3_init_hw fails.
1229 * We clear them again later as initialization may trigger
1230 * conditions that can interrupt.
1232 t3_intr_clear(adap);
1234 err = t3_init_hw(adap, 0);
1238 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1239 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1241 err = setup_sge_qsets(adap);
1245 for_each_port(adap, i)
1246 cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1249 if (!(adap->flags & NAPI_INIT))
1252 t3_start_sge_timers(adap);
1253 adap->flags |= FULL_INIT_DONE;
1256 t3_intr_clear(adap);
1258 if (adap->flags & USING_MSIX) {
1259 name_msix_vecs(adap);
1260 err = request_irq(adap->msix_info[0].vec,
1261 t3_async_intr_handler, 0,
1262 adap->msix_info[0].desc, adap);
1266 err = request_msix_data_irqs(adap);
1268 free_irq(adap->msix_info[0].vec, adap);
1271 } else if ((err = request_irq(adap->pdev->irq,
1272 t3_intr_handler(adap,
1273 adap->sge.qs[0].rspq.
1275 (adap->flags & USING_MSI) ?
1280 enable_all_napi(adap);
1282 t3_intr_enable(adap);
1284 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1285 is_offload(adap) && init_tp_parity(adap) == 0)
1286 adap->flags |= TP_PARITY_INIT;
1288 if (adap->flags & TP_PARITY_INIT) {
1289 t3_write_reg(adap, A_TP_INT_CAUSE,
1290 F_CMCACHEPERR | F_ARPLUTPERR);
1291 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1294 if (!(adap->flags & QUEUES_BOUND)) {
1295 int ret = bind_qsets(adap);
1298 CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1299 t3_intr_disable(adap);
1301 free_irq_resources(adap);
1305 adap->flags |= QUEUES_BOUND;
1311 CH_ERR(adap, "request_irq failed, err %d\n", err);
1316 * Release resources when all the ports and offloading have been stopped.
1318 static void cxgb_down(struct adapter *adapter, int on_wq)
1320 t3_sge_stop(adapter);
1321 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1322 t3_intr_disable(adapter);
1323 spin_unlock_irq(&adapter->work_lock);
1325 free_irq_resources(adapter);
1326 quiesce_rx(adapter);
1327 t3_sge_stop(adapter);
1329 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1332 static void schedule_chk_task(struct adapter *adap)
1336 timeo = adap->params.linkpoll_period ?
1337 (HZ * adap->params.linkpoll_period) / 10 :
1338 adap->params.stats_update_period * HZ;
1340 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1343 static int offload_open(struct net_device *dev)
1345 struct port_info *pi = netdev_priv(dev);
1346 struct adapter *adapter = pi->adapter;
1347 struct t3cdev *tdev = dev2t3cdev(dev);
1348 int adap_up = adapter->open_device_map & PORT_MASK;
1351 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1354 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1357 t3_tp_set_offload_mode(adapter, 1);
1358 tdev->lldev = adapter->port[0];
1359 err = cxgb3_offload_activate(adapter);
1363 init_port_mtus(adapter);
1364 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1365 adapter->params.b_wnd,
1366 adapter->params.rev == 0 ?
1367 adapter->port[0]->mtu : 0xffff);
1370 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1371 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1373 /* Call back all registered clients */
1374 cxgb3_add_clients(tdev);
1377 /* restore them in case the offload module has changed them */
1379 t3_tp_set_offload_mode(adapter, 0);
1380 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1381 cxgb3_set_dummy_ops(tdev);
1386 static int offload_close(struct t3cdev *tdev)
1388 struct adapter *adapter = tdev2adap(tdev);
1389 struct t3c_data *td = T3C_DATA(tdev);
1391 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1394 /* Call back all registered clients */
1395 cxgb3_remove_clients(tdev);
1397 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1399 /* Flush work scheduled while releasing TIDs */
1400 flush_work(&td->tid_release_task);
1403 cxgb3_set_dummy_ops(tdev);
1404 t3_tp_set_offload_mode(adapter, 0);
1405 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1407 if (!adapter->open_device_map)
1408 cxgb_down(adapter, 0);
1410 cxgb3_offload_deactivate(adapter);
1414 static int cxgb_open(struct net_device *dev)
1416 struct port_info *pi = netdev_priv(dev);
1417 struct adapter *adapter = pi->adapter;
1418 int other_ports = adapter->open_device_map & PORT_MASK;
1421 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1424 set_bit(pi->port_id, &adapter->open_device_map);
1425 if (is_offload(adapter) && !ofld_disable) {
1426 err = offload_open(dev);
1428 pr_warn("Could not initialize offload capabilities\n");
1431 netif_set_real_num_tx_queues(dev, pi->nqsets);
1432 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1436 t3_port_intr_enable(adapter, pi->port_id);
1437 netif_tx_start_all_queues(dev);
1439 schedule_chk_task(adapter);
1441 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1445 static int __cxgb_close(struct net_device *dev, int on_wq)
1447 struct port_info *pi = netdev_priv(dev);
1448 struct adapter *adapter = pi->adapter;
1451 if (!adapter->open_device_map)
1454 /* Stop link fault interrupts */
1455 t3_xgm_intr_disable(adapter, pi->port_id);
1456 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1458 t3_port_intr_disable(adapter, pi->port_id);
1459 netif_tx_stop_all_queues(dev);
1460 pi->phy.ops->power_down(&pi->phy, 1);
1461 netif_carrier_off(dev);
1462 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1464 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1465 clear_bit(pi->port_id, &adapter->open_device_map);
1466 spin_unlock_irq(&adapter->work_lock);
1468 if (!(adapter->open_device_map & PORT_MASK))
1469 cancel_delayed_work_sync(&adapter->adap_check_task);
1471 if (!adapter->open_device_map)
1472 cxgb_down(adapter, on_wq);
1474 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1478 static int cxgb_close(struct net_device *dev)
1480 return __cxgb_close(dev, 0);
1483 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1485 struct port_info *pi = netdev_priv(dev);
1486 struct adapter *adapter = pi->adapter;
1487 struct net_device_stats *ns = &dev->stats;
1488 const struct mac_stats *pstats;
1490 spin_lock(&adapter->stats_lock);
1491 pstats = t3_mac_update_stats(&pi->mac);
1492 spin_unlock(&adapter->stats_lock);
1494 ns->tx_bytes = pstats->tx_octets;
1495 ns->tx_packets = pstats->tx_frames;
1496 ns->rx_bytes = pstats->rx_octets;
1497 ns->rx_packets = pstats->rx_frames;
1498 ns->multicast = pstats->rx_mcast_frames;
1500 ns->tx_errors = pstats->tx_underrun;
1501 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1502 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1503 pstats->rx_fifo_ovfl;
1505 /* detailed rx_errors */
1506 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1507 ns->rx_over_errors = 0;
1508 ns->rx_crc_errors = pstats->rx_fcs_errs;
1509 ns->rx_frame_errors = pstats->rx_symbol_errs;
1510 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1511 ns->rx_missed_errors = pstats->rx_cong_drops;
1513 /* detailed tx_errors */
1514 ns->tx_aborted_errors = 0;
1515 ns->tx_carrier_errors = 0;
1516 ns->tx_fifo_errors = pstats->tx_underrun;
1517 ns->tx_heartbeat_errors = 0;
1518 ns->tx_window_errors = 0;
1522 static u32 get_msglevel(struct net_device *dev)
1524 struct port_info *pi = netdev_priv(dev);
1525 struct adapter *adapter = pi->adapter;
1527 return adapter->msg_enable;
1530 static void set_msglevel(struct net_device *dev, u32 val)
1532 struct port_info *pi = netdev_priv(dev);
1533 struct adapter *adapter = pi->adapter;
1535 adapter->msg_enable = val;
1538 static const char stats_strings[][ETH_GSTRING_LEN] = {
1541 "TxMulticastFramesOK",
1542 "TxBroadcastFramesOK",
1549 "TxFrames128To255 ",
1550 "TxFrames256To511 ",
1551 "TxFrames512To1023 ",
1552 "TxFrames1024To1518 ",
1553 "TxFrames1519ToMax ",
1557 "RxMulticastFramesOK",
1558 "RxBroadcastFramesOK",
1569 "RxFrames128To255 ",
1570 "RxFrames256To511 ",
1571 "RxFrames512To1023 ",
1572 "RxFrames1024To1518 ",
1573 "RxFrames1519ToMax ",
1586 "CheckTXEnToggled ",
1592 static int get_sset_count(struct net_device *dev, int sset)
1596 return ARRAY_SIZE(stats_strings);
1602 #define T3_REGMAP_SIZE (3 * 1024)
1604 static int get_regs_len(struct net_device *dev)
1606 return T3_REGMAP_SIZE;
1609 static int get_eeprom_len(struct net_device *dev)
1614 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1616 struct port_info *pi = netdev_priv(dev);
1617 struct adapter *adapter = pi->adapter;
1621 spin_lock(&adapter->stats_lock);
1622 t3_get_fw_version(adapter, &fw_vers);
1623 t3_get_tp_version(adapter, &tp_vers);
1624 spin_unlock(&adapter->stats_lock);
1626 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1627 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1628 strlcpy(info->bus_info, pci_name(adapter->pdev),
1629 sizeof(info->bus_info));
1631 snprintf(info->fw_version, sizeof(info->fw_version),
1632 "%s %u.%u.%u TP %u.%u.%u",
1633 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1634 G_FW_VERSION_MAJOR(fw_vers),
1635 G_FW_VERSION_MINOR(fw_vers),
1636 G_FW_VERSION_MICRO(fw_vers),
1637 G_TP_VERSION_MAJOR(tp_vers),
1638 G_TP_VERSION_MINOR(tp_vers),
1639 G_TP_VERSION_MICRO(tp_vers));
1642 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1644 if (stringset == ETH_SS_STATS)
1645 memcpy(data, stats_strings, sizeof(stats_strings));
1648 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1649 struct port_info *p, int idx)
1652 unsigned long tot = 0;
1654 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1655 tot += adapter->sge.qs[i].port_stats[idx];
1659 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1662 struct port_info *pi = netdev_priv(dev);
1663 struct adapter *adapter = pi->adapter;
1664 const struct mac_stats *s;
1666 spin_lock(&adapter->stats_lock);
1667 s = t3_mac_update_stats(&pi->mac);
1668 spin_unlock(&adapter->stats_lock);
1670 *data++ = s->tx_octets;
1671 *data++ = s->tx_frames;
1672 *data++ = s->tx_mcast_frames;
1673 *data++ = s->tx_bcast_frames;
1674 *data++ = s->tx_pause;
1675 *data++ = s->tx_underrun;
1676 *data++ = s->tx_fifo_urun;
1678 *data++ = s->tx_frames_64;
1679 *data++ = s->tx_frames_65_127;
1680 *data++ = s->tx_frames_128_255;
1681 *data++ = s->tx_frames_256_511;
1682 *data++ = s->tx_frames_512_1023;
1683 *data++ = s->tx_frames_1024_1518;
1684 *data++ = s->tx_frames_1519_max;
1686 *data++ = s->rx_octets;
1687 *data++ = s->rx_frames;
1688 *data++ = s->rx_mcast_frames;
1689 *data++ = s->rx_bcast_frames;
1690 *data++ = s->rx_pause;
1691 *data++ = s->rx_fcs_errs;
1692 *data++ = s->rx_symbol_errs;
1693 *data++ = s->rx_short;
1694 *data++ = s->rx_jabber;
1695 *data++ = s->rx_too_long;
1696 *data++ = s->rx_fifo_ovfl;
1698 *data++ = s->rx_frames_64;
1699 *data++ = s->rx_frames_65_127;
1700 *data++ = s->rx_frames_128_255;
1701 *data++ = s->rx_frames_256_511;
1702 *data++ = s->rx_frames_512_1023;
1703 *data++ = s->rx_frames_1024_1518;
1704 *data++ = s->rx_frames_1519_max;
1706 *data++ = pi->phy.fifo_errors;
1708 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1709 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1710 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1711 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1712 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1716 *data++ = s->rx_cong_drops;
1718 *data++ = s->num_toggled;
1719 *data++ = s->num_resets;
1721 *data++ = s->link_faults;
1724 static inline void reg_block_dump(struct adapter *ap, void *buf,
1725 unsigned int start, unsigned int end)
1727 u32 *p = buf + start;
1729 for (; start <= end; start += sizeof(u32))
1730 *p++ = t3_read_reg(ap, start);
1733 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1736 struct port_info *pi = netdev_priv(dev);
1737 struct adapter *ap = pi->adapter;
1741 * bits 0..9: chip version
1742 * bits 10..15: chip revision
1743 * bit 31: set for PCIe cards
1745 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1748 * We skip the MAC statistics registers because they are clear-on-read.
1749 * Also reading multi-register stats would need to synchronize with the
1750 * periodic mac stats accumulation. Hard to justify the complexity.
1752 memset(buf, 0, T3_REGMAP_SIZE);
1753 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1754 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1755 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1756 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1757 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1758 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1759 XGM_REG(A_XGM_SERDES_STAT3, 1));
1760 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1761 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1764 static int restart_autoneg(struct net_device *dev)
1766 struct port_info *p = netdev_priv(dev);
1768 if (!netif_running(dev))
1770 if (p->link_config.autoneg != AUTONEG_ENABLE)
1772 p->phy.ops->autoneg_restart(&p->phy);
1776 static int set_phys_id(struct net_device *dev,
1777 enum ethtool_phys_id_state state)
1779 struct port_info *pi = netdev_priv(dev);
1780 struct adapter *adapter = pi->adapter;
1783 case ETHTOOL_ID_ACTIVE:
1784 return 1; /* cycle on/off once per second */
1786 case ETHTOOL_ID_OFF:
1787 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1791 case ETHTOOL_ID_INACTIVE:
1792 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1799 static int get_link_ksettings(struct net_device *dev,
1800 struct ethtool_link_ksettings *cmd)
1802 struct port_info *p = netdev_priv(dev);
1805 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1806 p->link_config.supported);
1807 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1808 p->link_config.advertising);
1810 if (netif_carrier_ok(dev)) {
1811 cmd->base.speed = p->link_config.speed;
1812 cmd->base.duplex = p->link_config.duplex;
1814 cmd->base.speed = SPEED_UNKNOWN;
1815 cmd->base.duplex = DUPLEX_UNKNOWN;
1818 ethtool_convert_link_mode_to_legacy_u32(&supported,
1819 cmd->link_modes.supported);
1821 cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1822 cmd->base.phy_address = p->phy.mdio.prtad;
1823 cmd->base.autoneg = p->link_config.autoneg;
1827 static int speed_duplex_to_caps(int speed, int duplex)
1833 if (duplex == DUPLEX_FULL)
1834 cap = SUPPORTED_10baseT_Full;
1836 cap = SUPPORTED_10baseT_Half;
1839 if (duplex == DUPLEX_FULL)
1840 cap = SUPPORTED_100baseT_Full;
1842 cap = SUPPORTED_100baseT_Half;
1845 if (duplex == DUPLEX_FULL)
1846 cap = SUPPORTED_1000baseT_Full;
1848 cap = SUPPORTED_1000baseT_Half;
1851 if (duplex == DUPLEX_FULL)
1852 cap = SUPPORTED_10000baseT_Full;
1857 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1858 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1859 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1860 ADVERTISED_10000baseT_Full)
1862 static int set_link_ksettings(struct net_device *dev,
1863 const struct ethtool_link_ksettings *cmd)
1865 struct port_info *p = netdev_priv(dev);
1866 struct link_config *lc = &p->link_config;
1869 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1870 cmd->link_modes.advertising);
1872 if (!(lc->supported & SUPPORTED_Autoneg)) {
1874 * PHY offers a single speed/duplex. See if that's what's
1877 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1878 u32 speed = cmd->base.speed;
1879 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1880 if (lc->supported & cap)
1886 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1887 u32 speed = cmd->base.speed;
1888 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1890 if (!(lc->supported & cap) || (speed == SPEED_1000))
1892 lc->requested_speed = speed;
1893 lc->requested_duplex = cmd->base.duplex;
1894 lc->advertising = 0;
1896 advertising &= ADVERTISED_MASK;
1897 advertising &= lc->supported;
1900 lc->requested_speed = SPEED_INVALID;
1901 lc->requested_duplex = DUPLEX_INVALID;
1902 lc->advertising = advertising | ADVERTISED_Autoneg;
1904 lc->autoneg = cmd->base.autoneg;
1905 if (netif_running(dev))
1906 t3_link_start(&p->phy, &p->mac, lc);
1910 static void get_pauseparam(struct net_device *dev,
1911 struct ethtool_pauseparam *epause)
1913 struct port_info *p = netdev_priv(dev);
1915 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1916 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1917 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1920 static int set_pauseparam(struct net_device *dev,
1921 struct ethtool_pauseparam *epause)
1923 struct port_info *p = netdev_priv(dev);
1924 struct link_config *lc = &p->link_config;
1926 if (epause->autoneg == AUTONEG_DISABLE)
1927 lc->requested_fc = 0;
1928 else if (lc->supported & SUPPORTED_Autoneg)
1929 lc->requested_fc = PAUSE_AUTONEG;
1933 if (epause->rx_pause)
1934 lc->requested_fc |= PAUSE_RX;
1935 if (epause->tx_pause)
1936 lc->requested_fc |= PAUSE_TX;
1937 if (lc->autoneg == AUTONEG_ENABLE) {
1938 if (netif_running(dev))
1939 t3_link_start(&p->phy, &p->mac, lc);
1941 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1942 if (netif_running(dev))
1943 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1948 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1950 struct port_info *pi = netdev_priv(dev);
1951 struct adapter *adapter = pi->adapter;
1952 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1954 e->rx_max_pending = MAX_RX_BUFFERS;
1955 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1956 e->tx_max_pending = MAX_TXQ_ENTRIES;
1958 e->rx_pending = q->fl_size;
1959 e->rx_mini_pending = q->rspq_size;
1960 e->rx_jumbo_pending = q->jumbo_size;
1961 e->tx_pending = q->txq_size[0];
1964 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1966 struct port_info *pi = netdev_priv(dev);
1967 struct adapter *adapter = pi->adapter;
1968 struct qset_params *q;
1971 if (e->rx_pending > MAX_RX_BUFFERS ||
1972 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1973 e->tx_pending > MAX_TXQ_ENTRIES ||
1974 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1975 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1976 e->rx_pending < MIN_FL_ENTRIES ||
1977 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1978 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1981 if (adapter->flags & FULL_INIT_DONE)
1984 q = &adapter->params.sge.qset[pi->first_qset];
1985 for (i = 0; i < pi->nqsets; ++i, ++q) {
1986 q->rspq_size = e->rx_mini_pending;
1987 q->fl_size = e->rx_pending;
1988 q->jumbo_size = e->rx_jumbo_pending;
1989 q->txq_size[0] = e->tx_pending;
1990 q->txq_size[1] = e->tx_pending;
1991 q->txq_size[2] = e->tx_pending;
1996 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1998 struct port_info *pi = netdev_priv(dev);
1999 struct adapter *adapter = pi->adapter;
2000 struct qset_params *qsp;
2001 struct sge_qset *qs;
2004 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2007 for (i = 0; i < pi->nqsets; i++) {
2008 qsp = &adapter->params.sge.qset[i];
2009 qs = &adapter->sge.qs[i];
2010 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2011 t3_update_qset_coalesce(qs, qsp);
2017 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2019 struct port_info *pi = netdev_priv(dev);
2020 struct adapter *adapter = pi->adapter;
2021 struct qset_params *q = adapter->params.sge.qset;
2023 c->rx_coalesce_usecs = q->coalesce_usecs;
2027 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2030 struct port_info *pi = netdev_priv(dev);
2031 struct adapter *adapter = pi->adapter;
2034 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2038 e->magic = EEPROM_MAGIC;
2039 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2040 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2043 memcpy(data, buf + e->offset, e->len);
2048 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2051 struct port_info *pi = netdev_priv(dev);
2052 struct adapter *adapter = pi->adapter;
2053 u32 aligned_offset, aligned_len;
2058 if (eeprom->magic != EEPROM_MAGIC)
2061 aligned_offset = eeprom->offset & ~3;
2062 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2064 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2065 buf = kmalloc(aligned_len, GFP_KERNEL);
2068 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2069 if (!err && aligned_len > 4)
2070 err = t3_seeprom_read(adapter,
2071 aligned_offset + aligned_len - 4,
2072 (__le32 *) & buf[aligned_len - 4]);
2075 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2079 err = t3_seeprom_wp(adapter, 0);
2083 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2084 err = t3_seeprom_write(adapter, aligned_offset, *p);
2085 aligned_offset += 4;
2089 err = t3_seeprom_wp(adapter, 1);
2096 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2100 memset(&wol->sopass, 0, sizeof(wol->sopass));
2103 static const struct ethtool_ops cxgb_ethtool_ops = {
2104 .get_drvinfo = get_drvinfo,
2105 .get_msglevel = get_msglevel,
2106 .set_msglevel = set_msglevel,
2107 .get_ringparam = get_sge_param,
2108 .set_ringparam = set_sge_param,
2109 .get_coalesce = get_coalesce,
2110 .set_coalesce = set_coalesce,
2111 .get_eeprom_len = get_eeprom_len,
2112 .get_eeprom = get_eeprom,
2113 .set_eeprom = set_eeprom,
2114 .get_pauseparam = get_pauseparam,
2115 .set_pauseparam = set_pauseparam,
2116 .get_link = ethtool_op_get_link,
2117 .get_strings = get_strings,
2118 .set_phys_id = set_phys_id,
2119 .nway_reset = restart_autoneg,
2120 .get_sset_count = get_sset_count,
2121 .get_ethtool_stats = get_stats,
2122 .get_regs_len = get_regs_len,
2123 .get_regs = get_regs,
2125 .get_link_ksettings = get_link_ksettings,
2126 .set_link_ksettings = set_link_ksettings,
2129 static int in_range(int val, int lo, int hi)
2131 return val < 0 || (val <= hi && val >= lo);
2134 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2136 struct port_info *pi = netdev_priv(dev);
2137 struct adapter *adapter = pi->adapter;
2141 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2145 case CHELSIO_SET_QSET_PARAMS:{
2147 struct qset_params *q;
2148 struct ch_qset_params t;
2149 int q1 = pi->first_qset;
2150 int nqsets = pi->nqsets;
2152 if (!capable(CAP_NET_ADMIN))
2154 if (copy_from_user(&t, useraddr, sizeof(t)))
2156 if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2158 if (t.qset_idx >= SGE_QSETS)
2160 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2161 !in_range(t.cong_thres, 0, 255) ||
2162 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2164 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2166 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2167 MAX_CTRL_TXQ_ENTRIES) ||
2168 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2170 !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2171 MAX_RX_JUMBO_BUFFERS) ||
2172 !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2176 if ((adapter->flags & FULL_INIT_DONE) &&
2177 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2178 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2179 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2180 t.polling >= 0 || t.cong_thres >= 0))
2183 /* Allow setting of any available qset when offload enabled */
2184 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2186 for_each_port(adapter, i) {
2187 pi = adap2pinfo(adapter, i);
2188 nqsets += pi->first_qset + pi->nqsets;
2192 if (t.qset_idx < q1)
2194 if (t.qset_idx > q1 + nqsets - 1)
2197 q = &adapter->params.sge.qset[t.qset_idx];
2199 if (t.rspq_size >= 0)
2200 q->rspq_size = t.rspq_size;
2201 if (t.fl_size[0] >= 0)
2202 q->fl_size = t.fl_size[0];
2203 if (t.fl_size[1] >= 0)
2204 q->jumbo_size = t.fl_size[1];
2205 if (t.txq_size[0] >= 0)
2206 q->txq_size[0] = t.txq_size[0];
2207 if (t.txq_size[1] >= 0)
2208 q->txq_size[1] = t.txq_size[1];
2209 if (t.txq_size[2] >= 0)
2210 q->txq_size[2] = t.txq_size[2];
2211 if (t.cong_thres >= 0)
2212 q->cong_thres = t.cong_thres;
2213 if (t.intr_lat >= 0) {
2214 struct sge_qset *qs =
2215 &adapter->sge.qs[t.qset_idx];
2217 q->coalesce_usecs = t.intr_lat;
2218 t3_update_qset_coalesce(qs, q);
2220 if (t.polling >= 0) {
2221 if (adapter->flags & USING_MSIX)
2222 q->polling = t.polling;
2224 /* No polling with INTx for T3A */
2225 if (adapter->params.rev == 0 &&
2226 !(adapter->flags & USING_MSI))
2229 for (i = 0; i < SGE_QSETS; i++) {
2230 q = &adapter->params.sge.
2232 q->polling = t.polling;
2239 dev->wanted_features |= NETIF_F_GRO;
2241 dev->wanted_features &= ~NETIF_F_GRO;
2242 netdev_update_features(dev);
2247 case CHELSIO_GET_QSET_PARAMS:{
2248 struct qset_params *q;
2249 struct ch_qset_params t;
2250 int q1 = pi->first_qset;
2251 int nqsets = pi->nqsets;
2254 if (copy_from_user(&t, useraddr, sizeof(t)))
2257 if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2260 /* Display qsets for all ports when offload enabled */
2261 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2263 for_each_port(adapter, i) {
2264 pi = adap2pinfo(adapter, i);
2265 nqsets = pi->first_qset + pi->nqsets;
2269 if (t.qset_idx >= nqsets)
2271 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2273 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2274 t.rspq_size = q->rspq_size;
2275 t.txq_size[0] = q->txq_size[0];
2276 t.txq_size[1] = q->txq_size[1];
2277 t.txq_size[2] = q->txq_size[2];
2278 t.fl_size[0] = q->fl_size;
2279 t.fl_size[1] = q->jumbo_size;
2280 t.polling = q->polling;
2281 t.lro = !!(dev->features & NETIF_F_GRO);
2282 t.intr_lat = q->coalesce_usecs;
2283 t.cong_thres = q->cong_thres;
2286 if (adapter->flags & USING_MSIX)
2287 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2289 t.vector = adapter->pdev->irq;
2291 if (copy_to_user(useraddr, &t, sizeof(t)))
2295 case CHELSIO_SET_QSET_NUM:{
2296 struct ch_reg edata;
2297 unsigned int i, first_qset = 0, other_qsets = 0;
2299 if (!capable(CAP_NET_ADMIN))
2301 if (adapter->flags & FULL_INIT_DONE)
2303 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2305 if (edata.cmd != CHELSIO_SET_QSET_NUM)
2307 if (edata.val < 1 ||
2308 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2311 for_each_port(adapter, i)
2312 if (adapter->port[i] && adapter->port[i] != dev)
2313 other_qsets += adap2pinfo(adapter, i)->nqsets;
2315 if (edata.val + other_qsets > SGE_QSETS)
2318 pi->nqsets = edata.val;
2320 for_each_port(adapter, i)
2321 if (adapter->port[i]) {
2322 pi = adap2pinfo(adapter, i);
2323 pi->first_qset = first_qset;
2324 first_qset += pi->nqsets;
2328 case CHELSIO_GET_QSET_NUM:{
2329 struct ch_reg edata;
2331 memset(&edata, 0, sizeof(struct ch_reg));
2333 edata.cmd = CHELSIO_GET_QSET_NUM;
2334 edata.val = pi->nqsets;
2335 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2339 case CHELSIO_LOAD_FW:{
2341 struct ch_mem_range t;
2343 if (!capable(CAP_SYS_RAWIO))
2345 if (copy_from_user(&t, useraddr, sizeof(t)))
2347 if (t.cmd != CHELSIO_LOAD_FW)
2349 /* Check t.len sanity ? */
2350 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2351 if (IS_ERR(fw_data))
2352 return PTR_ERR(fw_data);
2354 ret = t3_load_fw(adapter, fw_data, t.len);
2360 case CHELSIO_SETMTUTAB:{
2364 if (!is_offload(adapter))
2366 if (!capable(CAP_NET_ADMIN))
2368 if (offload_running(adapter))
2370 if (copy_from_user(&m, useraddr, sizeof(m)))
2372 if (m.cmd != CHELSIO_SETMTUTAB)
2374 if (m.nmtus != NMTUS)
2376 if (m.mtus[0] < 81) /* accommodate SACK */
2379 /* MTUs must be in ascending order */
2380 for (i = 1; i < NMTUS; ++i)
2381 if (m.mtus[i] < m.mtus[i - 1])
2384 memcpy(adapter->params.mtus, m.mtus,
2385 sizeof(adapter->params.mtus));
2388 case CHELSIO_GET_PM:{
2389 struct tp_params *p = &adapter->params.tp;
2390 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2392 if (!is_offload(adapter))
2394 m.tx_pg_sz = p->tx_pg_size;
2395 m.tx_num_pg = p->tx_num_pgs;
2396 m.rx_pg_sz = p->rx_pg_size;
2397 m.rx_num_pg = p->rx_num_pgs;
2398 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2399 if (copy_to_user(useraddr, &m, sizeof(m)))
2403 case CHELSIO_SET_PM:{
2405 struct tp_params *p = &adapter->params.tp;
2407 if (!is_offload(adapter))
2409 if (!capable(CAP_NET_ADMIN))
2411 if (adapter->flags & FULL_INIT_DONE)
2413 if (copy_from_user(&m, useraddr, sizeof(m)))
2415 if (m.cmd != CHELSIO_SET_PM)
2417 if (!is_power_of_2(m.rx_pg_sz) ||
2418 !is_power_of_2(m.tx_pg_sz))
2419 return -EINVAL; /* not power of 2 */
2420 if (!(m.rx_pg_sz & 0x14000))
2421 return -EINVAL; /* not 16KB or 64KB */
2422 if (!(m.tx_pg_sz & 0x1554000))
2424 if (m.tx_num_pg == -1)
2425 m.tx_num_pg = p->tx_num_pgs;
2426 if (m.rx_num_pg == -1)
2427 m.rx_num_pg = p->rx_num_pgs;
2428 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2430 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2431 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2433 p->rx_pg_size = m.rx_pg_sz;
2434 p->tx_pg_size = m.tx_pg_sz;
2435 p->rx_num_pgs = m.rx_num_pg;
2436 p->tx_num_pgs = m.tx_num_pg;
2439 case CHELSIO_GET_MEM:{
2440 struct ch_mem_range t;
2444 if (!is_offload(adapter))
2446 if (!capable(CAP_NET_ADMIN))
2448 if (!(adapter->flags & FULL_INIT_DONE))
2449 return -EIO; /* need the memory controllers */
2450 if (copy_from_user(&t, useraddr, sizeof(t)))
2452 if (t.cmd != CHELSIO_GET_MEM)
2454 if ((t.addr & 7) || (t.len & 7))
2456 if (t.mem_id == MEM_CM)
2458 else if (t.mem_id == MEM_PMRX)
2459 mem = &adapter->pmrx;
2460 else if (t.mem_id == MEM_PMTX)
2461 mem = &adapter->pmtx;
2467 * bits 0..9: chip version
2468 * bits 10..15: chip revision
2470 t.version = 3 | (adapter->params.rev << 10);
2471 if (copy_to_user(useraddr, &t, sizeof(t)))
2475 * Read 256 bytes at a time as len can be large and we don't
2476 * want to use huge intermediate buffers.
2478 useraddr += sizeof(t); /* advance to start of buffer */
2480 unsigned int chunk =
2481 min_t(unsigned int, t.len, sizeof(buf));
2484 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2488 if (copy_to_user(useraddr, buf, chunk))
2496 case CHELSIO_SET_TRACE_FILTER:{
2498 const struct trace_params *tp;
2500 if (!capable(CAP_NET_ADMIN))
2502 if (!offload_running(adapter))
2504 if (copy_from_user(&t, useraddr, sizeof(t)))
2506 if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2509 tp = (const struct trace_params *)&t.sip;
2511 t3_config_trace_filter(adapter, tp, 0,
2515 t3_config_trace_filter(adapter, tp, 1,
2526 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2528 struct mii_ioctl_data *data = if_mii(req);
2529 struct port_info *pi = netdev_priv(dev);
2530 struct adapter *adapter = pi->adapter;
2535 /* Convert phy_id from older PRTAD/DEVAD format */
2536 if (is_10G(adapter) &&
2537 !mdio_phy_id_is_c45(data->phy_id) &&
2538 (data->phy_id & 0x1f00) &&
2539 !(data->phy_id & 0xe0e0))
2540 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2541 data->phy_id & 0x1f);
2544 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2546 return cxgb_extension_ioctl(dev, req->ifr_data);
2552 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2554 struct port_info *pi = netdev_priv(dev);
2555 struct adapter *adapter = pi->adapter;
2558 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2561 init_port_mtus(adapter);
2562 if (adapter->params.rev == 0 && offload_running(adapter))
2563 t3_load_mtus(adapter, adapter->params.mtus,
2564 adapter->params.a_wnd, adapter->params.b_wnd,
2565 adapter->port[0]->mtu);
2569 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2571 struct port_info *pi = netdev_priv(dev);
2572 struct adapter *adapter = pi->adapter;
2573 struct sockaddr *addr = p;
2575 if (!is_valid_ether_addr(addr->sa_data))
2576 return -EADDRNOTAVAIL;
2578 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2579 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2580 if (offload_running(adapter))
2581 write_smt_entry(adapter, pi->port_id);
2585 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2586 netdev_features_t features)
2589 * Since there is no support for separate rx/tx vlan accel
2590 * enable/disable make sure tx flag is always in same state as rx.
2592 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2593 features |= NETIF_F_HW_VLAN_CTAG_TX;
2595 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2600 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2602 netdev_features_t changed = dev->features ^ features;
2604 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2605 cxgb_vlan_mode(dev, features);
2610 #ifdef CONFIG_NET_POLL_CONTROLLER
2611 static void cxgb_netpoll(struct net_device *dev)
2613 struct port_info *pi = netdev_priv(dev);
2614 struct adapter *adapter = pi->adapter;
2617 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2618 struct sge_qset *qs = &adapter->sge.qs[qidx];
2621 if (adapter->flags & USING_MSIX)
2626 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2632 * Periodic accumulation of MAC statistics.
2634 static void mac_stats_update(struct adapter *adapter)
2638 for_each_port(adapter, i) {
2639 struct net_device *dev = adapter->port[i];
2640 struct port_info *p = netdev_priv(dev);
2642 if (netif_running(dev)) {
2643 spin_lock(&adapter->stats_lock);
2644 t3_mac_update_stats(&p->mac);
2645 spin_unlock(&adapter->stats_lock);
2650 static void check_link_status(struct adapter *adapter)
2654 for_each_port(adapter, i) {
2655 struct net_device *dev = adapter->port[i];
2656 struct port_info *p = netdev_priv(dev);
2659 spin_lock_irq(&adapter->work_lock);
2660 link_fault = p->link_fault;
2661 spin_unlock_irq(&adapter->work_lock);
2664 t3_link_fault(adapter, i);
2668 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2669 t3_xgm_intr_disable(adapter, i);
2670 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2672 t3_link_changed(adapter, i);
2673 t3_xgm_intr_enable(adapter, i);
2678 static void check_t3b2_mac(struct adapter *adapter)
2682 if (!rtnl_trylock()) /* synchronize with ifdown */
2685 for_each_port(adapter, i) {
2686 struct net_device *dev = adapter->port[i];
2687 struct port_info *p = netdev_priv(dev);
2690 if (!netif_running(dev))
2694 if (netif_running(dev) && netif_carrier_ok(dev))
2695 status = t3b2_mac_watchdog_task(&p->mac);
2697 p->mac.stats.num_toggled++;
2698 else if (status == 2) {
2699 struct cmac *mac = &p->mac;
2701 t3_mac_set_mtu(mac, dev->mtu);
2702 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2703 cxgb_set_rxmode(dev);
2704 t3_link_start(&p->phy, mac, &p->link_config);
2705 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2706 t3_port_intr_enable(adapter, p->port_id);
2707 p->mac.stats.num_resets++;
2714 static void t3_adap_check_task(struct work_struct *work)
2716 struct adapter *adapter = container_of(work, struct adapter,
2717 adap_check_task.work);
2718 const struct adapter_params *p = &adapter->params;
2720 unsigned int v, status, reset;
2722 adapter->check_task_cnt++;
2724 check_link_status(adapter);
2726 /* Accumulate MAC stats if needed */
2727 if (!p->linkpoll_period ||
2728 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2729 p->stats_update_period) {
2730 mac_stats_update(adapter);
2731 adapter->check_task_cnt = 0;
2734 if (p->rev == T3_REV_B2)
2735 check_t3b2_mac(adapter);
2738 * Scan the XGMAC's to check for various conditions which we want to
2739 * monitor in a periodic polling manner rather than via an interrupt
2740 * condition. This is used for conditions which would otherwise flood
2741 * the system with interrupts and we only really need to know that the
2742 * conditions are "happening" ... For each condition we count the
2743 * detection of the condition and reset it for the next polling loop.
2745 for_each_port(adapter, port) {
2746 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2749 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2751 if (cause & F_RXFIFO_OVERFLOW) {
2752 mac->stats.rx_fifo_ovfl++;
2753 reset |= F_RXFIFO_OVERFLOW;
2756 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2760 * We do the same as above for FL_EMPTY interrupts.
2762 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2765 if (status & F_FLEMPTY) {
2766 struct sge_qset *qs = &adapter->sge.qs[0];
2771 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2775 qs->fl[i].empty += (v & 1);
2783 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2785 /* Schedule the next check update if any port is active. */
2786 spin_lock_irq(&adapter->work_lock);
2787 if (adapter->open_device_map & PORT_MASK)
2788 schedule_chk_task(adapter);
2789 spin_unlock_irq(&adapter->work_lock);
2792 static void db_full_task(struct work_struct *work)
2794 struct adapter *adapter = container_of(work, struct adapter,
2797 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2800 static void db_empty_task(struct work_struct *work)
2802 struct adapter *adapter = container_of(work, struct adapter,
2805 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2808 static void db_drop_task(struct work_struct *work)
2810 struct adapter *adapter = container_of(work, struct adapter,
2812 unsigned long delay = 1000;
2815 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2818 * Sleep a while before ringing the driver qset dbs.
2819 * The delay is between 1000-2023 usecs.
2821 get_random_bytes(&r, 2);
2823 set_current_state(TASK_UNINTERRUPTIBLE);
2824 schedule_timeout(usecs_to_jiffies(delay));
2829 * Processes external (PHY) interrupts in process context.
2831 static void ext_intr_task(struct work_struct *work)
2833 struct adapter *adapter = container_of(work, struct adapter,
2834 ext_intr_handler_task);
2837 /* Disable link fault interrupts */
2838 for_each_port(adapter, i) {
2839 struct net_device *dev = adapter->port[i];
2840 struct port_info *p = netdev_priv(dev);
2842 t3_xgm_intr_disable(adapter, i);
2843 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2846 /* Re-enable link fault interrupts */
2847 t3_phy_intr_handler(adapter);
2849 for_each_port(adapter, i)
2850 t3_xgm_intr_enable(adapter, i);
2852 /* Now reenable external interrupts */
2853 spin_lock_irq(&adapter->work_lock);
2854 if (adapter->slow_intr_mask) {
2855 adapter->slow_intr_mask |= F_T3DBG;
2856 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2857 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2858 adapter->slow_intr_mask);
2860 spin_unlock_irq(&adapter->work_lock);
2864 * Interrupt-context handler for external (PHY) interrupts.
2866 void t3_os_ext_intr_handler(struct adapter *adapter)
2869 * Schedule a task to handle external interrupts as they may be slow
2870 * and we use a mutex to protect MDIO registers. We disable PHY
2871 * interrupts in the meantime and let the task reenable them when
2874 spin_lock(&adapter->work_lock);
2875 if (adapter->slow_intr_mask) {
2876 adapter->slow_intr_mask &= ~F_T3DBG;
2877 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2878 adapter->slow_intr_mask);
2879 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2881 spin_unlock(&adapter->work_lock);
2884 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2886 struct net_device *netdev = adapter->port[port_id];
2887 struct port_info *pi = netdev_priv(netdev);
2889 spin_lock(&adapter->work_lock);
2891 spin_unlock(&adapter->work_lock);
2894 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2898 if (is_offload(adapter) &&
2899 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2900 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2901 offload_close(&adapter->tdev);
2904 /* Stop all ports */
2905 for_each_port(adapter, i) {
2906 struct net_device *netdev = adapter->port[i];
2908 if (netif_running(netdev))
2909 __cxgb_close(netdev, on_wq);
2912 /* Stop SGE timers */
2913 t3_stop_sge_timers(adapter);
2915 adapter->flags &= ~FULL_INIT_DONE;
2918 ret = t3_reset_adapter(adapter);
2920 pci_disable_device(adapter->pdev);
2925 static int t3_reenable_adapter(struct adapter *adapter)
2927 if (pci_enable_device(adapter->pdev)) {
2928 dev_err(&adapter->pdev->dev,
2929 "Cannot re-enable PCI device after reset.\n");
2932 pci_set_master(adapter->pdev);
2933 pci_restore_state(adapter->pdev);
2934 pci_save_state(adapter->pdev);
2936 /* Free sge resources */
2937 t3_free_sge_resources(adapter);
2939 if (t3_replay_prep_adapter(adapter))
2947 static void t3_resume_ports(struct adapter *adapter)
2951 /* Restart the ports */
2952 for_each_port(adapter, i) {
2953 struct net_device *netdev = adapter->port[i];
2955 if (netif_running(netdev)) {
2956 if (cxgb_open(netdev)) {
2957 dev_err(&adapter->pdev->dev,
2958 "can't bring device back up"
2965 if (is_offload(adapter) && !ofld_disable)
2966 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2970 * processes a fatal error.
2971 * Bring the ports down, reset the chip, bring the ports back up.
2973 static void fatal_error_task(struct work_struct *work)
2975 struct adapter *adapter = container_of(work, struct adapter,
2976 fatal_error_handler_task);
2980 err = t3_adapter_error(adapter, 1, 1);
2982 err = t3_reenable_adapter(adapter);
2984 t3_resume_ports(adapter);
2986 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2990 void t3_fatal_err(struct adapter *adapter)
2992 unsigned int fw_status[4];
2994 if (adapter->flags & FULL_INIT_DONE) {
2995 t3_sge_stop(adapter);
2996 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2997 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2998 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2999 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
3001 spin_lock(&adapter->work_lock);
3002 t3_intr_disable(adapter);
3003 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
3004 spin_unlock(&adapter->work_lock);
3006 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3007 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3008 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3009 fw_status[0], fw_status[1],
3010 fw_status[2], fw_status[3]);
3014 * t3_io_error_detected - called when PCI error is detected
3015 * @pdev: Pointer to PCI device
3016 * @state: The current pci connection state
3018 * This function is called after a PCI bus error affecting
3019 * this device has been detected.
3021 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3022 pci_channel_state_t state)
3024 struct adapter *adapter = pci_get_drvdata(pdev);
3026 if (state == pci_channel_io_perm_failure)
3027 return PCI_ERS_RESULT_DISCONNECT;
3029 t3_adapter_error(adapter, 0, 0);
3031 /* Request a slot reset. */
3032 return PCI_ERS_RESULT_NEED_RESET;
3036 * t3_io_slot_reset - called after the pci bus has been reset.
3037 * @pdev: Pointer to PCI device
3039 * Restart the card from scratch, as if from a cold-boot.
3041 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3043 struct adapter *adapter = pci_get_drvdata(pdev);
3045 if (!t3_reenable_adapter(adapter))
3046 return PCI_ERS_RESULT_RECOVERED;
3048 return PCI_ERS_RESULT_DISCONNECT;
3052 * t3_io_resume - called when traffic can start flowing again.
3053 * @pdev: Pointer to PCI device
3055 * This callback is called when the error recovery driver tells us that
3056 * its OK to resume normal operation.
3058 static void t3_io_resume(struct pci_dev *pdev)
3060 struct adapter *adapter = pci_get_drvdata(pdev);
3062 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3063 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3066 t3_resume_ports(adapter);
3070 static const struct pci_error_handlers t3_err_handler = {
3071 .error_detected = t3_io_error_detected,
3072 .slot_reset = t3_io_slot_reset,
3073 .resume = t3_io_resume,
3077 * Set the number of qsets based on the number of CPUs and the number of ports,
3078 * not to exceed the number of available qsets, assuming there are enough qsets
3081 static void set_nqsets(struct adapter *adap)
3084 int num_cpus = netif_get_num_default_rss_queues();
3085 int hwports = adap->params.nports;
3086 int nqsets = adap->msix_nvectors - 1;
3088 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3090 (hwports * nqsets > SGE_QSETS ||
3091 num_cpus >= nqsets / hwports))
3093 if (nqsets > num_cpus)
3095 if (nqsets < 1 || hwports == 4)
3100 for_each_port(adap, i) {
3101 struct port_info *pi = adap2pinfo(adap, i);
3104 pi->nqsets = nqsets;
3105 j = pi->first_qset + nqsets;
3107 dev_info(&adap->pdev->dev,
3108 "Port %d using %d queue sets.\n", i, nqsets);
3112 static int cxgb_enable_msix(struct adapter *adap)
3114 struct msix_entry entries[SGE_QSETS + 1];
3118 vectors = ARRAY_SIZE(entries);
3119 for (i = 0; i < vectors; ++i)
3120 entries[i].entry = i;
3122 vectors = pci_enable_msix_range(adap->pdev, entries,
3123 adap->params.nports + 1, vectors);
3127 for (i = 0; i < vectors; ++i)
3128 adap->msix_info[i].vec = entries[i].vector;
3129 adap->msix_nvectors = vectors;
3134 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3136 static const char *pci_variant[] = {
3137 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3144 snprintf(buf, sizeof(buf), "%s x%d",
3145 pci_variant[adap->params.pci.variant],
3146 adap->params.pci.width);
3148 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3149 pci_variant[adap->params.pci.variant],
3150 adap->params.pci.speed, adap->params.pci.width);
3152 for_each_port(adap, i) {
3153 struct net_device *dev = adap->port[i];
3154 const struct port_info *pi = netdev_priv(dev);
3156 if (!test_bit(i, &adap->registered_device_map))
3158 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3159 ai->desc, pi->phy.desc,
3160 is_offload(adap) ? "R" : "", adap->params.rev, buf,
3161 (adap->flags & USING_MSIX) ? " MSI-X" :
3162 (adap->flags & USING_MSI) ? " MSI" : "");
3163 if (adap->name == dev->name && adap->params.vpd.mclk)
3164 pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3165 adap->name, t3_mc7_size(&adap->cm) >> 20,
3166 t3_mc7_size(&adap->pmtx) >> 20,
3167 t3_mc7_size(&adap->pmrx) >> 20,
3168 adap->params.vpd.sn);
3172 static const struct net_device_ops cxgb_netdev_ops = {
3173 .ndo_open = cxgb_open,
3174 .ndo_stop = cxgb_close,
3175 .ndo_start_xmit = t3_eth_xmit,
3176 .ndo_get_stats = cxgb_get_stats,
3177 .ndo_validate_addr = eth_validate_addr,
3178 .ndo_set_rx_mode = cxgb_set_rxmode,
3179 .ndo_do_ioctl = cxgb_ioctl,
3180 .ndo_change_mtu = cxgb_change_mtu,
3181 .ndo_set_mac_address = cxgb_set_mac_addr,
3182 .ndo_fix_features = cxgb_fix_features,
3183 .ndo_set_features = cxgb_set_features,
3184 #ifdef CONFIG_NET_POLL_CONTROLLER
3185 .ndo_poll_controller = cxgb_netpoll,
3189 static void cxgb3_init_iscsi_mac(struct net_device *dev)
3191 struct port_info *pi = netdev_priv(dev);
3193 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3194 pi->iscsic.mac_addr[3] |= 0x80;
3197 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3198 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3199 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3200 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3202 int i, err, pci_using_dac = 0;
3203 resource_size_t mmio_start, mmio_len;
3204 const struct adapter_info *ai;
3205 struct adapter *adapter = NULL;
3206 struct port_info *pi;
3208 pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3211 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3213 pr_err("cannot initialize work queue\n");
3218 err = pci_enable_device(pdev);
3220 dev_err(&pdev->dev, "cannot enable PCI device\n");
3224 err = pci_request_regions(pdev, DRV_NAME);
3226 /* Just info, some other driver may have claimed the device. */
3227 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3228 goto out_disable_device;
3231 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3233 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3235 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3236 "coherent allocations\n");
3237 goto out_release_regions;
3239 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3240 dev_err(&pdev->dev, "no usable DMA configuration\n");
3241 goto out_release_regions;
3244 pci_set_master(pdev);
3245 pci_save_state(pdev);
3247 mmio_start = pci_resource_start(pdev, 0);
3248 mmio_len = pci_resource_len(pdev, 0);
3249 ai = t3_get_adapter_info(ent->driver_data);
3251 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3254 goto out_release_regions;
3257 adapter->nofail_skb =
3258 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3259 if (!adapter->nofail_skb) {
3260 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3262 goto out_free_adapter;
3265 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3266 if (!adapter->regs) {
3267 dev_err(&pdev->dev, "cannot map device registers\n");
3269 goto out_free_adapter_nofail;
3272 adapter->pdev = pdev;
3273 adapter->name = pci_name(pdev);
3274 adapter->msg_enable = dflt_msg_enable;
3275 adapter->mmio_len = mmio_len;
3277 mutex_init(&adapter->mdio_lock);
3278 spin_lock_init(&adapter->work_lock);
3279 spin_lock_init(&adapter->stats_lock);
3281 INIT_LIST_HEAD(&adapter->adapter_list);
3282 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3283 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3285 INIT_WORK(&adapter->db_full_task, db_full_task);
3286 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3287 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3289 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3291 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3292 struct net_device *netdev;
3294 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3300 SET_NETDEV_DEV(netdev, &pdev->dev);
3302 adapter->port[i] = netdev;
3303 pi = netdev_priv(netdev);
3304 pi->adapter = adapter;
3306 netif_carrier_off(netdev);
3307 netdev->irq = pdev->irq;
3308 netdev->mem_start = mmio_start;
3309 netdev->mem_end = mmio_start + mmio_len - 1;
3310 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3311 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3312 netdev->features |= netdev->hw_features |
3313 NETIF_F_HW_VLAN_CTAG_TX;
3314 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3316 netdev->features |= NETIF_F_HIGHDMA;
3318 netdev->netdev_ops = &cxgb_netdev_ops;
3319 netdev->ethtool_ops = &cxgb_ethtool_ops;
3320 netdev->min_mtu = 81;
3321 netdev->max_mtu = ETH_MAX_MTU;
3322 netdev->dev_port = pi->port_id;
3325 pci_set_drvdata(pdev, adapter);
3326 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3332 * The card is now ready to go. If any errors occur during device
3333 * registration we do not fail the whole card but rather proceed only
3334 * with the ports we manage to register successfully. However we must
3335 * register at least one net device.
3337 for_each_port(adapter, i) {
3338 err = register_netdev(adapter->port[i]);
3340 dev_warn(&pdev->dev,
3341 "cannot register net device %s, skipping\n",
3342 adapter->port[i]->name);
3345 * Change the name we use for messages to the name of
3346 * the first successfully registered interface.
3348 if (!adapter->registered_device_map)
3349 adapter->name = adapter->port[i]->name;
3351 __set_bit(i, &adapter->registered_device_map);
3354 if (!adapter->registered_device_map) {
3355 dev_err(&pdev->dev, "could not register any net devices\n");
3359 for_each_port(adapter, i)
3360 cxgb3_init_iscsi_mac(adapter->port[i]);
3362 /* Driver's ready. Reflect it on LEDs */
3363 t3_led_ready(adapter);
3365 if (is_offload(adapter)) {
3366 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3367 cxgb3_adapter_ofld(adapter);
3370 /* See what interrupts we'll be using */
3371 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3372 adapter->flags |= USING_MSIX;
3373 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3374 adapter->flags |= USING_MSI;
3376 set_nqsets(adapter);
3378 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3381 dev_err(&pdev->dev, "cannot create sysfs group\n");
3385 print_port_info(adapter, ai);
3389 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
3392 iounmap(adapter->regs);
3393 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3394 if (adapter->port[i])
3395 free_netdev(adapter->port[i]);
3397 out_free_adapter_nofail:
3398 kfree_skb(adapter->nofail_skb);
3403 out_release_regions:
3404 pci_release_regions(pdev);
3406 pci_disable_device(pdev);
3411 static void remove_one(struct pci_dev *pdev)
3413 struct adapter *adapter = pci_get_drvdata(pdev);
3418 t3_sge_stop(adapter);
3419 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3422 if (is_offload(adapter)) {
3423 cxgb3_adapter_unofld(adapter);
3424 if (test_bit(OFFLOAD_DEVMAP_BIT,
3425 &adapter->open_device_map))
3426 offload_close(&adapter->tdev);
3429 for_each_port(adapter, i)
3430 if (test_bit(i, &adapter->registered_device_map))
3431 unregister_netdev(adapter->port[i]);
3433 t3_stop_sge_timers(adapter);
3434 t3_free_sge_resources(adapter);
3435 cxgb_disable_msi(adapter);
3437 for_each_port(adapter, i)
3438 if (adapter->port[i])
3439 free_netdev(adapter->port[i]);
3441 iounmap(adapter->regs);
3442 if (adapter->nofail_skb)
3443 kfree_skb(adapter->nofail_skb);
3445 pci_release_regions(pdev);
3446 pci_disable_device(pdev);
3450 static struct pci_driver driver = {
3452 .id_table = cxgb3_pci_tbl,
3454 .remove = remove_one,
3455 .err_handler = &t3_err_handler,
3458 static int __init cxgb3_init_module(void)
3462 cxgb3_offload_init();
3464 ret = pci_register_driver(&driver);
3468 static void __exit cxgb3_cleanup_module(void)
3470 pci_unregister_driver(&driver);
3472 destroy_workqueue(cxgb3_wq);
3475 module_init(cxgb3_init_module);
3476 module_exit(cxgb3_cleanup_module);