GNU Linux-libre 4.9.337-gnu1
[releases.git] / drivers / net / ethernet / chelsio / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/init.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/if_vlan.h>
43 #include <linux/mdio.h>
44 #include <linux/sockios.h>
45 #include <linux/workqueue.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/stringify.h>
51 #include <linux/sched.h>
52 #include <linux/slab.h>
53 #include <linux/nospec.h>
54 #include <asm/uaccess.h>
55
56 #include "common.h"
57 #include "cxgb3_ioctl.h"
58 #include "regs.h"
59 #include "cxgb3_offload.h"
60 #include "version.h"
61
62 #include "cxgb3_ctl_defs.h"
63 #include "t3_cpl.h"
64 #include "firmware_exports.h"
65
66 enum {
67         MAX_TXQ_ENTRIES = 16384,
68         MAX_CTRL_TXQ_ENTRIES = 1024,
69         MAX_RSPQ_ENTRIES = 16384,
70         MAX_RX_BUFFERS = 16384,
71         MAX_RX_JUMBO_BUFFERS = 16384,
72         MIN_TXQ_ENTRIES = 4,
73         MIN_CTRL_TXQ_ENTRIES = 4,
74         MIN_RSPQ_ENTRIES = 32,
75         MIN_FL_ENTRIES = 32
76 };
77
78 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
79
80 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
81                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
82                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
83
84 #define EEPROM_MAGIC 0x38E2F10C
85
86 #define CH_DEVICE(devid, idx) \
87         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
88
89 static const struct pci_device_id cxgb3_pci_tbl[] = {
90         CH_DEVICE(0x20, 0),     /* PE9000 */
91         CH_DEVICE(0x21, 1),     /* T302E */
92         CH_DEVICE(0x22, 2),     /* T310E */
93         CH_DEVICE(0x23, 3),     /* T320X */
94         CH_DEVICE(0x24, 1),     /* T302X */
95         CH_DEVICE(0x25, 3),     /* T320E */
96         CH_DEVICE(0x26, 2),     /* T310X */
97         CH_DEVICE(0x30, 2),     /* T3B10 */
98         CH_DEVICE(0x31, 3),     /* T3B20 */
99         CH_DEVICE(0x32, 1),     /* T3B02 */
100         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
101         CH_DEVICE(0x36, 3),     /* S320E-CR */
102         CH_DEVICE(0x37, 7),     /* N320E-G2 */
103         {0,}
104 };
105
106 MODULE_DESCRIPTION(DRV_DESC);
107 MODULE_AUTHOR("Chelsio Communications");
108 MODULE_LICENSE("Dual BSD/GPL");
109 MODULE_VERSION(DRV_VERSION);
110 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
111
112 static int dflt_msg_enable = DFLT_MSG_ENABLE;
113
114 module_param(dflt_msg_enable, int, 0644);
115 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
116
117 /*
118  * The driver uses the best interrupt scheme available on a platform in the
119  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
120  * of these schemes the driver may consider as follows:
121  *
122  * msi = 2: choose from among all three options
123  * msi = 1: only consider MSI and pin interrupts
124  * msi = 0: force pin interrupts
125  */
126 static int msi = 2;
127
128 module_param(msi, int, 0644);
129 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
130
131 /*
132  * The driver enables offload as a default.
133  * To disable it, use ofld_disable = 1.
134  */
135
136 static int ofld_disable = 0;
137
138 module_param(ofld_disable, int, 0644);
139 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140
141 /*
142  * We have work elements that we need to cancel when an interface is taken
143  * down.  Normally the work elements would be executed by keventd but that
144  * can deadlock because of linkwatch.  If our close method takes the rtnl
145  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
146  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
147  * for our work to complete.  Get our own work queue to solve this.
148  */
149 struct workqueue_struct *cxgb3_wq;
150
151 /**
152  *      link_report - show link status and link speed/duplex
153  *      @p: the port whose settings are to be reported
154  *
155  *      Shows the link status, speed, and duplex of a port.
156  */
157 static void link_report(struct net_device *dev)
158 {
159         if (!netif_carrier_ok(dev))
160                 netdev_info(dev, "link down\n");
161         else {
162                 const char *s = "10Mbps";
163                 const struct port_info *p = netdev_priv(dev);
164
165                 switch (p->link_config.speed) {
166                 case SPEED_10000:
167                         s = "10Gbps";
168                         break;
169                 case SPEED_1000:
170                         s = "1000Mbps";
171                         break;
172                 case SPEED_100:
173                         s = "100Mbps";
174                         break;
175                 }
176
177                 netdev_info(dev, "link up, %s, %s-duplex\n",
178                             s, p->link_config.duplex == DUPLEX_FULL
179                             ? "full" : "half");
180         }
181 }
182
183 static void enable_tx_fifo_drain(struct adapter *adapter,
184                                  struct port_info *pi)
185 {
186         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
187                          F_ENDROPPKT);
188         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
189         t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
190         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
191 }
192
193 static void disable_tx_fifo_drain(struct adapter *adapter,
194                                   struct port_info *pi)
195 {
196         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
197                          F_ENDROPPKT, 0);
198 }
199
200 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
201 {
202         struct net_device *dev = adap->port[port_id];
203         struct port_info *pi = netdev_priv(dev);
204
205         if (state == netif_carrier_ok(dev))
206                 return;
207
208         if (state) {
209                 struct cmac *mac = &pi->mac;
210
211                 netif_carrier_on(dev);
212
213                 disable_tx_fifo_drain(adap, pi);
214
215                 /* Clear local faults */
216                 t3_xgm_intr_disable(adap, pi->port_id);
217                 t3_read_reg(adap, A_XGM_INT_STATUS +
218                                     pi->mac.offset);
219                 t3_write_reg(adap,
220                              A_XGM_INT_CAUSE + pi->mac.offset,
221                              F_XGM_INT);
222
223                 t3_set_reg_field(adap,
224                                  A_XGM_INT_ENABLE +
225                                  pi->mac.offset,
226                                  F_XGM_INT, F_XGM_INT);
227                 t3_xgm_intr_enable(adap, pi->port_id);
228
229                 t3_mac_enable(mac, MAC_DIRECTION_TX);
230         } else {
231                 netif_carrier_off(dev);
232
233                 /* Flush TX FIFO */
234                 enable_tx_fifo_drain(adap, pi);
235         }
236         link_report(dev);
237 }
238
239 /**
240  *      t3_os_link_changed - handle link status changes
241  *      @adapter: the adapter associated with the link change
242  *      @port_id: the port index whose limk status has changed
243  *      @link_stat: the new status of the link
244  *      @speed: the new speed setting
245  *      @duplex: the new duplex setting
246  *      @pause: the new flow-control setting
247  *
248  *      This is the OS-dependent handler for link status changes.  The OS
249  *      neutral handler takes care of most of the processing for these events,
250  *      then calls this handler for any OS-specific processing.
251  */
252 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
253                         int speed, int duplex, int pause)
254 {
255         struct net_device *dev = adapter->port[port_id];
256         struct port_info *pi = netdev_priv(dev);
257         struct cmac *mac = &pi->mac;
258
259         /* Skip changes from disabled ports. */
260         if (!netif_running(dev))
261                 return;
262
263         if (link_stat != netif_carrier_ok(dev)) {
264                 if (link_stat) {
265                         disable_tx_fifo_drain(adapter, pi);
266
267                         t3_mac_enable(mac, MAC_DIRECTION_RX);
268
269                         /* Clear local faults */
270                         t3_xgm_intr_disable(adapter, pi->port_id);
271                         t3_read_reg(adapter, A_XGM_INT_STATUS +
272                                     pi->mac.offset);
273                         t3_write_reg(adapter,
274                                      A_XGM_INT_CAUSE + pi->mac.offset,
275                                      F_XGM_INT);
276
277                         t3_set_reg_field(adapter,
278                                          A_XGM_INT_ENABLE + pi->mac.offset,
279                                          F_XGM_INT, F_XGM_INT);
280                         t3_xgm_intr_enable(adapter, pi->port_id);
281
282                         netif_carrier_on(dev);
283                 } else {
284                         netif_carrier_off(dev);
285
286                         t3_xgm_intr_disable(adapter, pi->port_id);
287                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
288                         t3_set_reg_field(adapter,
289                                          A_XGM_INT_ENABLE + pi->mac.offset,
290                                          F_XGM_INT, 0);
291
292                         if (is_10G(adapter))
293                                 pi->phy.ops->power_down(&pi->phy, 1);
294
295                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
296                         t3_mac_disable(mac, MAC_DIRECTION_RX);
297                         t3_link_start(&pi->phy, mac, &pi->link_config);
298
299                         /* Flush TX FIFO */
300                         enable_tx_fifo_drain(adapter, pi);
301                 }
302
303                 link_report(dev);
304         }
305 }
306
307 /**
308  *      t3_os_phymod_changed - handle PHY module changes
309  *      @phy: the PHY reporting the module change
310  *      @mod_type: new module type
311  *
312  *      This is the OS-dependent handler for PHY module changes.  It is
313  *      invoked when a PHY module is removed or inserted for any OS-specific
314  *      processing.
315  */
316 void t3_os_phymod_changed(struct adapter *adap, int port_id)
317 {
318         static const char *mod_str[] = {
319                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
320         };
321
322         const struct net_device *dev = adap->port[port_id];
323         const struct port_info *pi = netdev_priv(dev);
324
325         if (pi->phy.modtype == phy_modtype_none)
326                 netdev_info(dev, "PHY module unplugged\n");
327         else
328                 netdev_info(dev, "%s PHY module inserted\n",
329                             mod_str[pi->phy.modtype]);
330 }
331
332 static void cxgb_set_rxmode(struct net_device *dev)
333 {
334         struct port_info *pi = netdev_priv(dev);
335
336         t3_mac_set_rx_mode(&pi->mac, dev);
337 }
338
339 /**
340  *      link_start - enable a port
341  *      @dev: the device to enable
342  *
343  *      Performs the MAC and PHY actions needed to enable a port.
344  */
345 static void link_start(struct net_device *dev)
346 {
347         struct port_info *pi = netdev_priv(dev);
348         struct cmac *mac = &pi->mac;
349
350         t3_mac_reset(mac);
351         t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
352         t3_mac_set_mtu(mac, dev->mtu);
353         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
354         t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
355         t3_mac_set_rx_mode(mac, dev);
356         t3_link_start(&pi->phy, mac, &pi->link_config);
357         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
358 }
359
360 static inline void cxgb_disable_msi(struct adapter *adapter)
361 {
362         if (adapter->flags & USING_MSIX) {
363                 pci_disable_msix(adapter->pdev);
364                 adapter->flags &= ~USING_MSIX;
365         } else if (adapter->flags & USING_MSI) {
366                 pci_disable_msi(adapter->pdev);
367                 adapter->flags &= ~USING_MSI;
368         }
369 }
370
371 /*
372  * Interrupt handler for asynchronous events used with MSI-X.
373  */
374 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
375 {
376         t3_slow_intr_handler(cookie);
377         return IRQ_HANDLED;
378 }
379
380 /*
381  * Name the MSI-X interrupts.
382  */
383 static void name_msix_vecs(struct adapter *adap)
384 {
385         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
386
387         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
388         adap->msix_info[0].desc[n] = 0;
389
390         for_each_port(adap, j) {
391                 struct net_device *d = adap->port[j];
392                 const struct port_info *pi = netdev_priv(d);
393
394                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
395                         snprintf(adap->msix_info[msi_idx].desc, n,
396                                  "%s-%d", d->name, pi->first_qset + i);
397                         adap->msix_info[msi_idx].desc[n] = 0;
398                 }
399         }
400 }
401
402 static int request_msix_data_irqs(struct adapter *adap)
403 {
404         int i, j, err, qidx = 0;
405
406         for_each_port(adap, i) {
407                 int nqsets = adap2pinfo(adap, i)->nqsets;
408
409                 for (j = 0; j < nqsets; ++j) {
410                         err = request_irq(adap->msix_info[qidx + 1].vec,
411                                           t3_intr_handler(adap,
412                                                           adap->sge.qs[qidx].
413                                                           rspq.polling), 0,
414                                           adap->msix_info[qidx + 1].desc,
415                                           &adap->sge.qs[qidx]);
416                         if (err) {
417                                 while (--qidx >= 0)
418                                         free_irq(adap->msix_info[qidx + 1].vec,
419                                                  &adap->sge.qs[qidx]);
420                                 return err;
421                         }
422                         qidx++;
423                 }
424         }
425         return 0;
426 }
427
428 static void free_irq_resources(struct adapter *adapter)
429 {
430         if (adapter->flags & USING_MSIX) {
431                 int i, n = 0;
432
433                 free_irq(adapter->msix_info[0].vec, adapter);
434                 for_each_port(adapter, i)
435                         n += adap2pinfo(adapter, i)->nqsets;
436
437                 for (i = 0; i < n; ++i)
438                         free_irq(adapter->msix_info[i + 1].vec,
439                                  &adapter->sge.qs[i]);
440         } else
441                 free_irq(adapter->pdev->irq, adapter);
442 }
443
444 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
445                               unsigned long n)
446 {
447         int attempts = 10;
448
449         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
450                 if (!--attempts)
451                         return -ETIMEDOUT;
452                 msleep(10);
453         }
454         return 0;
455 }
456
457 static int init_tp_parity(struct adapter *adap)
458 {
459         int i;
460         struct sk_buff *skb;
461         struct cpl_set_tcb_field *greq;
462         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
463
464         t3_tp_set_offload_mode(adap, 1);
465
466         for (i = 0; i < 16; i++) {
467                 struct cpl_smt_write_req *req;
468
469                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
470                 if (!skb)
471                         skb = adap->nofail_skb;
472                 if (!skb)
473                         goto alloc_skb_fail;
474
475                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
476                 memset(req, 0, sizeof(*req));
477                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
478                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
479                 req->mtu_idx = NMTUS - 1;
480                 req->iff = i;
481                 t3_mgmt_tx(adap, skb);
482                 if (skb == adap->nofail_skb) {
483                         await_mgmt_replies(adap, cnt, i + 1);
484                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
485                         if (!adap->nofail_skb)
486                                 goto alloc_skb_fail;
487                 }
488         }
489
490         for (i = 0; i < 2048; i++) {
491                 struct cpl_l2t_write_req *req;
492
493                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
494                 if (!skb)
495                         skb = adap->nofail_skb;
496                 if (!skb)
497                         goto alloc_skb_fail;
498
499                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
500                 memset(req, 0, sizeof(*req));
501                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
502                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
503                 req->params = htonl(V_L2T_W_IDX(i));
504                 t3_mgmt_tx(adap, skb);
505                 if (skb == adap->nofail_skb) {
506                         await_mgmt_replies(adap, cnt, 16 + i + 1);
507                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
508                         if (!adap->nofail_skb)
509                                 goto alloc_skb_fail;
510                 }
511         }
512
513         for (i = 0; i < 2048; i++) {
514                 struct cpl_rte_write_req *req;
515
516                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
517                 if (!skb)
518                         skb = adap->nofail_skb;
519                 if (!skb)
520                         goto alloc_skb_fail;
521
522                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
523                 memset(req, 0, sizeof(*req));
524                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
525                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
526                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
527                 t3_mgmt_tx(adap, skb);
528                 if (skb == adap->nofail_skb) {
529                         await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
530                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
531                         if (!adap->nofail_skb)
532                                 goto alloc_skb_fail;
533                 }
534         }
535
536         skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
537         if (!skb)
538                 skb = adap->nofail_skb;
539         if (!skb)
540                 goto alloc_skb_fail;
541
542         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
543         memset(greq, 0, sizeof(*greq));
544         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
545         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
546         greq->mask = cpu_to_be64(1);
547         t3_mgmt_tx(adap, skb);
548
549         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
550         if (skb == adap->nofail_skb) {
551                 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
552                 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
553         }
554
555         t3_tp_set_offload_mode(adap, 0);
556         return i;
557
558 alloc_skb_fail:
559         t3_tp_set_offload_mode(adap, 0);
560         return -ENOMEM;
561 }
562
563 /**
564  *      setup_rss - configure RSS
565  *      @adap: the adapter
566  *
567  *      Sets up RSS to distribute packets to multiple receive queues.  We
568  *      configure the RSS CPU lookup table to distribute to the number of HW
569  *      receive queues, and the response queue lookup table to narrow that
570  *      down to the response queues actually configured for each port.
571  *      We always configure the RSS mapping for two ports since the mapping
572  *      table has plenty of entries.
573  */
574 static void setup_rss(struct adapter *adap)
575 {
576         int i;
577         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
578         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
579         u8 cpus[SGE_QSETS + 1];
580         u16 rspq_map[RSS_TABLE_SIZE + 1];
581
582         for (i = 0; i < SGE_QSETS; ++i)
583                 cpus[i] = i;
584         cpus[SGE_QSETS] = 0xff; /* terminator */
585
586         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
587                 rspq_map[i] = i % nq0;
588                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
589         }
590         rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
591
592         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
593                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
594                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
595 }
596
597 static void ring_dbs(struct adapter *adap)
598 {
599         int i, j;
600
601         for (i = 0; i < SGE_QSETS; i++) {
602                 struct sge_qset *qs = &adap->sge.qs[i];
603
604                 if (qs->adap)
605                         for (j = 0; j < SGE_TXQ_PER_SET; j++)
606                                 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
607         }
608 }
609
610 static void init_napi(struct adapter *adap)
611 {
612         int i;
613
614         for (i = 0; i < SGE_QSETS; i++) {
615                 struct sge_qset *qs = &adap->sge.qs[i];
616
617                 if (qs->adap)
618                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
619                                        64);
620         }
621
622         /*
623          * netif_napi_add() can be called only once per napi_struct because it
624          * adds each new napi_struct to a list.  Be careful not to call it a
625          * second time, e.g., during EEH recovery, by making a note of it.
626          */
627         adap->flags |= NAPI_INIT;
628 }
629
630 /*
631  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
632  * both netdevices representing interfaces and the dummy ones for the extra
633  * queues.
634  */
635 static void quiesce_rx(struct adapter *adap)
636 {
637         int i;
638
639         for (i = 0; i < SGE_QSETS; i++)
640                 if (adap->sge.qs[i].adap)
641                         napi_disable(&adap->sge.qs[i].napi);
642 }
643
644 static void enable_all_napi(struct adapter *adap)
645 {
646         int i;
647         for (i = 0; i < SGE_QSETS; i++)
648                 if (adap->sge.qs[i].adap)
649                         napi_enable(&adap->sge.qs[i].napi);
650 }
651
652 /**
653  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
654  *      @adap: the adapter
655  *
656  *      Determines how many sets of SGE queues to use and initializes them.
657  *      We support multiple queue sets per port if we have MSI-X, otherwise
658  *      just one queue set per port.
659  */
660 static int setup_sge_qsets(struct adapter *adap)
661 {
662         int i, j, err, irq_idx = 0, qset_idx = 0;
663         unsigned int ntxq = SGE_TXQ_PER_SET;
664
665         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
666                 irq_idx = -1;
667
668         for_each_port(adap, i) {
669                 struct net_device *dev = adap->port[i];
670                 struct port_info *pi = netdev_priv(dev);
671
672                 pi->qs = &adap->sge.qs[pi->first_qset];
673                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
674                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
675                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
676                                                              irq_idx,
677                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
678                                 netdev_get_tx_queue(dev, j));
679                         if (err) {
680                                 t3_free_sge_resources(adap);
681                                 return err;
682                         }
683                 }
684         }
685
686         return 0;
687 }
688
689 static ssize_t attr_show(struct device *d, char *buf,
690                          ssize_t(*format) (struct net_device *, char *))
691 {
692         ssize_t len;
693
694         /* Synchronize with ioctls that may shut down the device */
695         rtnl_lock();
696         len = (*format) (to_net_dev(d), buf);
697         rtnl_unlock();
698         return len;
699 }
700
701 static ssize_t attr_store(struct device *d,
702                           const char *buf, size_t len,
703                           ssize_t(*set) (struct net_device *, unsigned int),
704                           unsigned int min_val, unsigned int max_val)
705 {
706         ssize_t ret;
707         unsigned int val;
708
709         if (!capable(CAP_NET_ADMIN))
710                 return -EPERM;
711
712         ret = kstrtouint(buf, 0, &val);
713         if (ret)
714                 return ret;
715         if (val < min_val || val > max_val)
716                 return -EINVAL;
717
718         rtnl_lock();
719         ret = (*set) (to_net_dev(d), val);
720         if (!ret)
721                 ret = len;
722         rtnl_unlock();
723         return ret;
724 }
725
726 #define CXGB3_SHOW(name, val_expr) \
727 static ssize_t format_##name(struct net_device *dev, char *buf) \
728 { \
729         struct port_info *pi = netdev_priv(dev); \
730         struct adapter *adap = pi->adapter; \
731         return sprintf(buf, "%u\n", val_expr); \
732 } \
733 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
734                            char *buf) \
735 { \
736         return attr_show(d, buf, format_##name); \
737 }
738
739 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
740 {
741         struct port_info *pi = netdev_priv(dev);
742         struct adapter *adap = pi->adapter;
743         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
744
745         if (adap->flags & FULL_INIT_DONE)
746                 return -EBUSY;
747         if (val && adap->params.rev == 0)
748                 return -EINVAL;
749         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
750             min_tids)
751                 return -EINVAL;
752         adap->params.mc5.nfilters = val;
753         return 0;
754 }
755
756 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
757                               const char *buf, size_t len)
758 {
759         return attr_store(d, buf, len, set_nfilters, 0, ~0);
760 }
761
762 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
763 {
764         struct port_info *pi = netdev_priv(dev);
765         struct adapter *adap = pi->adapter;
766
767         if (adap->flags & FULL_INIT_DONE)
768                 return -EBUSY;
769         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
770             MC5_MIN_TIDS)
771                 return -EINVAL;
772         adap->params.mc5.nservers = val;
773         return 0;
774 }
775
776 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
777                               const char *buf, size_t len)
778 {
779         return attr_store(d, buf, len, set_nservers, 0, ~0);
780 }
781
782 #define CXGB3_ATTR_R(name, val_expr) \
783 CXGB3_SHOW(name, val_expr) \
784 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
785
786 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
787 CXGB3_SHOW(name, val_expr) \
788 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
789
790 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
791 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
792 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
793
794 static struct attribute *cxgb3_attrs[] = {
795         &dev_attr_cam_size.attr,
796         &dev_attr_nfilters.attr,
797         &dev_attr_nservers.attr,
798         NULL
799 };
800
801 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
802
803 static ssize_t tm_attr_show(struct device *d,
804                             char *buf, int sched)
805 {
806         struct port_info *pi = netdev_priv(to_net_dev(d));
807         struct adapter *adap = pi->adapter;
808         unsigned int v, addr, bpt, cpt;
809         ssize_t len;
810
811         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
812         rtnl_lock();
813         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
814         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
815         if (sched & 1)
816                 v >>= 16;
817         bpt = (v >> 8) & 0xff;
818         cpt = v & 0xff;
819         if (!cpt)
820                 len = sprintf(buf, "disabled\n");
821         else {
822                 v = (adap->params.vpd.cclk * 1000) / cpt;
823                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
824         }
825         rtnl_unlock();
826         return len;
827 }
828
829 static ssize_t tm_attr_store(struct device *d,
830                              const char *buf, size_t len, int sched)
831 {
832         struct port_info *pi = netdev_priv(to_net_dev(d));
833         struct adapter *adap = pi->adapter;
834         unsigned int val;
835         ssize_t ret;
836
837         if (!capable(CAP_NET_ADMIN))
838                 return -EPERM;
839
840         ret = kstrtouint(buf, 0, &val);
841         if (ret)
842                 return ret;
843         if (val > 10000000)
844                 return -EINVAL;
845
846         rtnl_lock();
847         ret = t3_config_sched(adap, val, sched);
848         if (!ret)
849                 ret = len;
850         rtnl_unlock();
851         return ret;
852 }
853
854 #define TM_ATTR(name, sched) \
855 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
856                            char *buf) \
857 { \
858         return tm_attr_show(d, buf, sched); \
859 } \
860 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
861                             const char *buf, size_t len) \
862 { \
863         return tm_attr_store(d, buf, len, sched); \
864 } \
865 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
866
867 TM_ATTR(sched0, 0);
868 TM_ATTR(sched1, 1);
869 TM_ATTR(sched2, 2);
870 TM_ATTR(sched3, 3);
871 TM_ATTR(sched4, 4);
872 TM_ATTR(sched5, 5);
873 TM_ATTR(sched6, 6);
874 TM_ATTR(sched7, 7);
875
876 static struct attribute *offload_attrs[] = {
877         &dev_attr_sched0.attr,
878         &dev_attr_sched1.attr,
879         &dev_attr_sched2.attr,
880         &dev_attr_sched3.attr,
881         &dev_attr_sched4.attr,
882         &dev_attr_sched5.attr,
883         &dev_attr_sched6.attr,
884         &dev_attr_sched7.attr,
885         NULL
886 };
887
888 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
889
890 /*
891  * Sends an sk_buff to an offload queue driver
892  * after dealing with any active network taps.
893  */
894 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
895 {
896         int ret;
897
898         local_bh_disable();
899         ret = t3_offload_tx(tdev, skb);
900         local_bh_enable();
901         return ret;
902 }
903
904 static int write_smt_entry(struct adapter *adapter, int idx)
905 {
906         struct cpl_smt_write_req *req;
907         struct port_info *pi = netdev_priv(adapter->port[idx]);
908         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
909
910         if (!skb)
911                 return -ENOMEM;
912
913         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
914         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
915         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
916         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
917         req->iff = idx;
918         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
919         memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
920         skb->priority = 1;
921         offload_tx(&adapter->tdev, skb);
922         return 0;
923 }
924
925 static int init_smt(struct adapter *adapter)
926 {
927         int i;
928
929         for_each_port(adapter, i)
930             write_smt_entry(adapter, i);
931         return 0;
932 }
933
934 static void init_port_mtus(struct adapter *adapter)
935 {
936         unsigned int mtus = adapter->port[0]->mtu;
937
938         if (adapter->port[1])
939                 mtus |= adapter->port[1]->mtu << 16;
940         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
941 }
942
943 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
944                               int hi, int port)
945 {
946         struct sk_buff *skb;
947         struct mngt_pktsched_wr *req;
948         int ret;
949
950         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
951         if (!skb)
952                 skb = adap->nofail_skb;
953         if (!skb)
954                 return -ENOMEM;
955
956         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
957         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
958         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
959         req->sched = sched;
960         req->idx = qidx;
961         req->min = lo;
962         req->max = hi;
963         req->binding = port;
964         ret = t3_mgmt_tx(adap, skb);
965         if (skb == adap->nofail_skb) {
966                 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
967                                              GFP_KERNEL);
968                 if (!adap->nofail_skb)
969                         ret = -ENOMEM;
970         }
971
972         return ret;
973 }
974
975 static int bind_qsets(struct adapter *adap)
976 {
977         int i, j, err = 0;
978
979         for_each_port(adap, i) {
980                 const struct port_info *pi = adap2pinfo(adap, i);
981
982                 for (j = 0; j < pi->nqsets; ++j) {
983                         int ret = send_pktsched_cmd(adap, 1,
984                                                     pi->first_qset + j, -1,
985                                                     -1, i);
986                         if (ret)
987                                 err = ret;
988                 }
989         }
990
991         return err;
992 }
993
994 /*(DEBLOBBED)*/
995 #define FW_FNAME "/*(DEBLOBBED)*/"
996 /*(DEBLOBBED)*/
997 #define TPSRAM_NAME "/*(DEBLOBBED)*/"
998 #define AEL2005_OPT_EDC_NAME "/*(DEBLOBBED)*/"
999 #define AEL2005_TWX_EDC_NAME "/*(DEBLOBBED)*/"
1000 #define AEL2020_TWX_EDC_NAME "/*(DEBLOBBED)*/"
1001 /*(DEBLOBBED)*/
1002
1003 static inline const char *get_edc_fw_name(int edc_idx)
1004 {
1005         const char *fw_name = NULL;
1006
1007         switch (edc_idx) {
1008         case EDC_OPT_AEL2005:
1009                 fw_name = AEL2005_OPT_EDC_NAME;
1010                 break;
1011         case EDC_TWX_AEL2005:
1012                 fw_name = AEL2005_TWX_EDC_NAME;
1013                 break;
1014         case EDC_TWX_AEL2020:
1015                 fw_name = AEL2020_TWX_EDC_NAME;
1016                 break;
1017         }
1018         return fw_name;
1019 }
1020
1021 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1022 {
1023         struct adapter *adapter = phy->adapter;
1024         const struct firmware *fw;
1025         const char *fw_name;
1026         u32 csum;
1027         const __be32 *p;
1028         u16 *cache = phy->phy_cache;
1029         int i, ret = -EINVAL;
1030
1031         fw_name = get_edc_fw_name(edc_idx);
1032         if (fw_name)
1033                 ret = reject_firmware(&fw, fw_name, &adapter->pdev->dev);
1034         if (ret < 0) {
1035                 dev_err(&adapter->pdev->dev,
1036                         "could not upgrade firmware: unable to load %s\n",
1037                         fw_name);
1038                 return ret;
1039         }
1040
1041         /* check size, take checksum in account */
1042         if (fw->size > size + 4) {
1043                 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1044                        (unsigned int)fw->size, size + 4);
1045                 ret = -EINVAL;
1046         }
1047
1048         /* compute checksum */
1049         p = (const __be32 *)fw->data;
1050         for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1051                 csum += ntohl(p[i]);
1052
1053         if (csum != 0xffffffff) {
1054                 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1055                        csum);
1056                 ret = -EINVAL;
1057         }
1058
1059         for (i = 0; i < size / 4 ; i++) {
1060                 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1061                 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1062         }
1063
1064         release_firmware(fw);
1065
1066         return ret;
1067 }
1068
1069 static int upgrade_fw(struct adapter *adap)
1070 {
1071         int ret;
1072         const struct firmware *fw;
1073         struct device *dev = &adap->pdev->dev;
1074
1075         ret = reject_firmware(&fw, FW_FNAME, dev);
1076         if (ret < 0) {
1077                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1078                         FW_FNAME);
1079                 return ret;
1080         }
1081         ret = t3_load_fw(adap, fw->data, fw->size);
1082         release_firmware(fw);
1083
1084         if (ret == 0)
1085                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1086                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1087         else
1088                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1089                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1090
1091         return ret;
1092 }
1093
1094 static inline char t3rev2char(struct adapter *adapter)
1095 {
1096         char rev = 0;
1097
1098         switch(adapter->params.rev) {
1099         case T3_REV_B:
1100         case T3_REV_B2:
1101                 rev = 'b';
1102                 break;
1103         case T3_REV_C:
1104                 rev = 'c';
1105                 break;
1106         }
1107         return rev;
1108 }
1109
1110 static int update_tpsram(struct adapter *adap)
1111 {
1112         const struct firmware *tpsram;
1113         char buf[64];
1114         struct device *dev = &adap->pdev->dev;
1115         int ret;
1116         char rev;
1117
1118         rev = t3rev2char(adap);
1119         if (!rev)
1120                 return 0;
1121
1122         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1123
1124         ret = reject_firmware(&tpsram, buf, dev);
1125         if (ret < 0) {
1126                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1127                         buf);
1128                 return ret;
1129         }
1130
1131         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1132         if (ret)
1133                 goto release_tpsram;
1134
1135         ret = t3_set_proto_sram(adap, tpsram->data);
1136         if (ret == 0)
1137                 dev_info(dev,
1138                          "successful update of protocol engine "
1139                          "to %d.%d.%d\n",
1140                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1141         else
1142                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1143                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1144         if (ret)
1145                 dev_err(dev, "loading protocol SRAM failed\n");
1146
1147 release_tpsram:
1148         release_firmware(tpsram);
1149
1150         return ret;
1151 }
1152
1153 /**
1154  * t3_synchronize_rx - wait for current Rx processing on a port to complete
1155  * @adap: the adapter
1156  * @p: the port
1157  *
1158  * Ensures that current Rx processing on any of the queues associated with
1159  * the given port completes before returning.  We do this by acquiring and
1160  * releasing the locks of the response queues associated with the port.
1161  */
1162 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1163 {
1164         int i;
1165
1166         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1167                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1168
1169                 spin_lock_irq(&q->lock);
1170                 spin_unlock_irq(&q->lock);
1171         }
1172 }
1173
1174 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1175 {
1176         struct port_info *pi = netdev_priv(dev);
1177         struct adapter *adapter = pi->adapter;
1178
1179         if (adapter->params.rev > 0) {
1180                 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1181                                   features & NETIF_F_HW_VLAN_CTAG_RX);
1182         } else {
1183                 /* single control for all ports */
1184                 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1185
1186                 for_each_port(adapter, i)
1187                         have_vlans |=
1188                                 adapter->port[i]->features &
1189                                 NETIF_F_HW_VLAN_CTAG_RX;
1190
1191                 t3_set_vlan_accel(adapter, 1, have_vlans);
1192         }
1193         t3_synchronize_rx(adapter, pi);
1194 }
1195
1196 /**
1197  *      cxgb_up - enable the adapter
1198  *      @adapter: adapter being enabled
1199  *
1200  *      Called when the first port is enabled, this function performs the
1201  *      actions necessary to make an adapter operational, such as completing
1202  *      the initialization of HW modules, and enabling interrupts.
1203  *
1204  *      Must be called with the rtnl lock held.
1205  */
1206 static int cxgb_up(struct adapter *adap)
1207 {
1208         int i, err;
1209
1210         if (!(adap->flags & FULL_INIT_DONE)) {
1211                 err = t3_check_fw_version(adap);
1212                 if (err == -EINVAL) {
1213                         err = upgrade_fw(adap);
1214                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1215                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1216                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1217                 }
1218
1219                 err = t3_check_tpsram_version(adap);
1220                 if (err == -EINVAL) {
1221                         err = update_tpsram(adap);
1222                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1223                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1224                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1225                 }
1226
1227                 /*
1228                  * Clear interrupts now to catch errors if t3_init_hw fails.
1229                  * We clear them again later as initialization may trigger
1230                  * conditions that can interrupt.
1231                  */
1232                 t3_intr_clear(adap);
1233
1234                 err = t3_init_hw(adap, 0);
1235                 if (err)
1236                         goto out;
1237
1238                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1239                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1240
1241                 err = setup_sge_qsets(adap);
1242                 if (err)
1243                         goto out;
1244
1245                 for_each_port(adap, i)
1246                         cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1247
1248                 setup_rss(adap);
1249                 if (!(adap->flags & NAPI_INIT))
1250                         init_napi(adap);
1251
1252                 t3_start_sge_timers(adap);
1253                 adap->flags |= FULL_INIT_DONE;
1254         }
1255
1256         t3_intr_clear(adap);
1257
1258         if (adap->flags & USING_MSIX) {
1259                 name_msix_vecs(adap);
1260                 err = request_irq(adap->msix_info[0].vec,
1261                                   t3_async_intr_handler, 0,
1262                                   adap->msix_info[0].desc, adap);
1263                 if (err)
1264                         goto irq_err;
1265
1266                 err = request_msix_data_irqs(adap);
1267                 if (err) {
1268                         free_irq(adap->msix_info[0].vec, adap);
1269                         goto irq_err;
1270                 }
1271         } else if ((err = request_irq(adap->pdev->irq,
1272                                       t3_intr_handler(adap,
1273                                                       adap->sge.qs[0].rspq.
1274                                                       polling),
1275                                       (adap->flags & USING_MSI) ?
1276                                        0 : IRQF_SHARED,
1277                                       adap->name, adap)))
1278                 goto irq_err;
1279
1280         enable_all_napi(adap);
1281         t3_sge_start(adap);
1282         t3_intr_enable(adap);
1283
1284         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1285             is_offload(adap) && init_tp_parity(adap) == 0)
1286                 adap->flags |= TP_PARITY_INIT;
1287
1288         if (adap->flags & TP_PARITY_INIT) {
1289                 t3_write_reg(adap, A_TP_INT_CAUSE,
1290                              F_CMCACHEPERR | F_ARPLUTPERR);
1291                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1292         }
1293
1294         if (!(adap->flags & QUEUES_BOUND)) {
1295                 int ret = bind_qsets(adap);
1296
1297                 if (ret < 0) {
1298                         CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1299                         t3_intr_disable(adap);
1300                         quiesce_rx(adap);
1301                         free_irq_resources(adap);
1302                         err = ret;
1303                         goto out;
1304                 }
1305                 adap->flags |= QUEUES_BOUND;
1306         }
1307
1308 out:
1309         return err;
1310 irq_err:
1311         CH_ERR(adap, "request_irq failed, err %d\n", err);
1312         goto out;
1313 }
1314
1315 /*
1316  * Release resources when all the ports and offloading have been stopped.
1317  */
1318 static void cxgb_down(struct adapter *adapter, int on_wq)
1319 {
1320         t3_sge_stop(adapter);
1321         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1322         t3_intr_disable(adapter);
1323         spin_unlock_irq(&adapter->work_lock);
1324
1325         free_irq_resources(adapter);
1326         quiesce_rx(adapter);
1327         t3_sge_stop(adapter);
1328         if (!on_wq)
1329                 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1330 }
1331
1332 static void schedule_chk_task(struct adapter *adap)
1333 {
1334         unsigned int timeo;
1335
1336         timeo = adap->params.linkpoll_period ?
1337             (HZ * adap->params.linkpoll_period) / 10 :
1338             adap->params.stats_update_period * HZ;
1339         if (timeo)
1340                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1341 }
1342
1343 static int offload_open(struct net_device *dev)
1344 {
1345         struct port_info *pi = netdev_priv(dev);
1346         struct adapter *adapter = pi->adapter;
1347         struct t3cdev *tdev = dev2t3cdev(dev);
1348         int adap_up = adapter->open_device_map & PORT_MASK;
1349         int err;
1350
1351         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1352                 return 0;
1353
1354         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1355                 goto out;
1356
1357         t3_tp_set_offload_mode(adapter, 1);
1358         tdev->lldev = adapter->port[0];
1359         err = cxgb3_offload_activate(adapter);
1360         if (err)
1361                 goto out;
1362
1363         init_port_mtus(adapter);
1364         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1365                      adapter->params.b_wnd,
1366                      adapter->params.rev == 0 ?
1367                      adapter->port[0]->mtu : 0xffff);
1368         init_smt(adapter);
1369
1370         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1371                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1372
1373         /* Call back all registered clients */
1374         cxgb3_add_clients(tdev);
1375
1376 out:
1377         /* restore them in case the offload module has changed them */
1378         if (err) {
1379                 t3_tp_set_offload_mode(adapter, 0);
1380                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1381                 cxgb3_set_dummy_ops(tdev);
1382         }
1383         return err;
1384 }
1385
1386 static int offload_close(struct t3cdev *tdev)
1387 {
1388         struct adapter *adapter = tdev2adap(tdev);
1389         struct t3c_data *td = T3C_DATA(tdev);
1390
1391         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1392                 return 0;
1393
1394         /* Call back all registered clients */
1395         cxgb3_remove_clients(tdev);
1396
1397         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1398
1399         /* Flush work scheduled while releasing TIDs */
1400         flush_work(&td->tid_release_task);
1401
1402         tdev->lldev = NULL;
1403         cxgb3_set_dummy_ops(tdev);
1404         t3_tp_set_offload_mode(adapter, 0);
1405         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1406
1407         if (!adapter->open_device_map)
1408                 cxgb_down(adapter, 0);
1409
1410         cxgb3_offload_deactivate(adapter);
1411         return 0;
1412 }
1413
1414 static int cxgb_open(struct net_device *dev)
1415 {
1416         struct port_info *pi = netdev_priv(dev);
1417         struct adapter *adapter = pi->adapter;
1418         int other_ports = adapter->open_device_map & PORT_MASK;
1419         int err;
1420
1421         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1422                 return err;
1423
1424         set_bit(pi->port_id, &adapter->open_device_map);
1425         if (is_offload(adapter) && !ofld_disable) {
1426                 err = offload_open(dev);
1427                 if (err)
1428                         pr_warn("Could not initialize offload capabilities\n");
1429         }
1430
1431         netif_set_real_num_tx_queues(dev, pi->nqsets);
1432         err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1433         if (err)
1434                 return err;
1435         link_start(dev);
1436         t3_port_intr_enable(adapter, pi->port_id);
1437         netif_tx_start_all_queues(dev);
1438         if (!other_ports)
1439                 schedule_chk_task(adapter);
1440
1441         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1442         return 0;
1443 }
1444
1445 static int __cxgb_close(struct net_device *dev, int on_wq)
1446 {
1447         struct port_info *pi = netdev_priv(dev);
1448         struct adapter *adapter = pi->adapter;
1449
1450         
1451         if (!adapter->open_device_map)
1452                 return 0;
1453
1454         /* Stop link fault interrupts */
1455         t3_xgm_intr_disable(adapter, pi->port_id);
1456         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1457
1458         t3_port_intr_disable(adapter, pi->port_id);
1459         netif_tx_stop_all_queues(dev);
1460         pi->phy.ops->power_down(&pi->phy, 1);
1461         netif_carrier_off(dev);
1462         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1463
1464         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1465         clear_bit(pi->port_id, &adapter->open_device_map);
1466         spin_unlock_irq(&adapter->work_lock);
1467
1468         if (!(adapter->open_device_map & PORT_MASK))
1469                 cancel_delayed_work_sync(&adapter->adap_check_task);
1470
1471         if (!adapter->open_device_map)
1472                 cxgb_down(adapter, on_wq);
1473
1474         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1475         return 0;
1476 }
1477
1478 static int cxgb_close(struct net_device *dev)
1479 {
1480         return __cxgb_close(dev, 0);
1481 }
1482
1483 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1484 {
1485         struct port_info *pi = netdev_priv(dev);
1486         struct adapter *adapter = pi->adapter;
1487         struct net_device_stats *ns = &pi->netstats;
1488         const struct mac_stats *pstats;
1489
1490         spin_lock(&adapter->stats_lock);
1491         pstats = t3_mac_update_stats(&pi->mac);
1492         spin_unlock(&adapter->stats_lock);
1493
1494         ns->tx_bytes = pstats->tx_octets;
1495         ns->tx_packets = pstats->tx_frames;
1496         ns->rx_bytes = pstats->rx_octets;
1497         ns->rx_packets = pstats->rx_frames;
1498         ns->multicast = pstats->rx_mcast_frames;
1499
1500         ns->tx_errors = pstats->tx_underrun;
1501         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1502             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1503             pstats->rx_fifo_ovfl;
1504
1505         /* detailed rx_errors */
1506         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1507         ns->rx_over_errors = 0;
1508         ns->rx_crc_errors = pstats->rx_fcs_errs;
1509         ns->rx_frame_errors = pstats->rx_symbol_errs;
1510         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1511         ns->rx_missed_errors = pstats->rx_cong_drops;
1512
1513         /* detailed tx_errors */
1514         ns->tx_aborted_errors = 0;
1515         ns->tx_carrier_errors = 0;
1516         ns->tx_fifo_errors = pstats->tx_underrun;
1517         ns->tx_heartbeat_errors = 0;
1518         ns->tx_window_errors = 0;
1519         return ns;
1520 }
1521
1522 static u32 get_msglevel(struct net_device *dev)
1523 {
1524         struct port_info *pi = netdev_priv(dev);
1525         struct adapter *adapter = pi->adapter;
1526
1527         return adapter->msg_enable;
1528 }
1529
1530 static void set_msglevel(struct net_device *dev, u32 val)
1531 {
1532         struct port_info *pi = netdev_priv(dev);
1533         struct adapter *adapter = pi->adapter;
1534
1535         adapter->msg_enable = val;
1536 }
1537
1538 static const char stats_strings[][ETH_GSTRING_LEN] = {
1539         "TxOctetsOK         ",
1540         "TxFramesOK         ",
1541         "TxMulticastFramesOK",
1542         "TxBroadcastFramesOK",
1543         "TxPauseFrames      ",
1544         "TxUnderrun         ",
1545         "TxExtUnderrun      ",
1546
1547         "TxFrames64         ",
1548         "TxFrames65To127    ",
1549         "TxFrames128To255   ",
1550         "TxFrames256To511   ",
1551         "TxFrames512To1023  ",
1552         "TxFrames1024To1518 ",
1553         "TxFrames1519ToMax  ",
1554
1555         "RxOctetsOK         ",
1556         "RxFramesOK         ",
1557         "RxMulticastFramesOK",
1558         "RxBroadcastFramesOK",
1559         "RxPauseFrames      ",
1560         "RxFCSErrors        ",
1561         "RxSymbolErrors     ",
1562         "RxShortErrors      ",
1563         "RxJabberErrors     ",
1564         "RxLengthErrors     ",
1565         "RxFIFOoverflow     ",
1566
1567         "RxFrames64         ",
1568         "RxFrames65To127    ",
1569         "RxFrames128To255   ",
1570         "RxFrames256To511   ",
1571         "RxFrames512To1023  ",
1572         "RxFrames1024To1518 ",
1573         "RxFrames1519ToMax  ",
1574
1575         "PhyFIFOErrors      ",
1576         "TSO                ",
1577         "VLANextractions    ",
1578         "VLANinsertions     ",
1579         "TxCsumOffload      ",
1580         "RxCsumGood         ",
1581         "LroAggregated      ",
1582         "LroFlushed         ",
1583         "LroNoDesc          ",
1584         "RxDrops            ",
1585
1586         "CheckTXEnToggled   ",
1587         "CheckResets        ",
1588
1589         "LinkFaults         ",
1590 };
1591
1592 static int get_sset_count(struct net_device *dev, int sset)
1593 {
1594         switch (sset) {
1595         case ETH_SS_STATS:
1596                 return ARRAY_SIZE(stats_strings);
1597         default:
1598                 return -EOPNOTSUPP;
1599         }
1600 }
1601
1602 #define T3_REGMAP_SIZE (3 * 1024)
1603
1604 static int get_regs_len(struct net_device *dev)
1605 {
1606         return T3_REGMAP_SIZE;
1607 }
1608
1609 static int get_eeprom_len(struct net_device *dev)
1610 {
1611         return EEPROMSIZE;
1612 }
1613
1614 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1615 {
1616         struct port_info *pi = netdev_priv(dev);
1617         struct adapter *adapter = pi->adapter;
1618         u32 fw_vers = 0;
1619         u32 tp_vers = 0;
1620
1621         spin_lock(&adapter->stats_lock);
1622         t3_get_fw_version(adapter, &fw_vers);
1623         t3_get_tp_version(adapter, &tp_vers);
1624         spin_unlock(&adapter->stats_lock);
1625
1626         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1627         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1628         strlcpy(info->bus_info, pci_name(adapter->pdev),
1629                 sizeof(info->bus_info));
1630         if (fw_vers)
1631                 snprintf(info->fw_version, sizeof(info->fw_version),
1632                          "%s %u.%u.%u TP %u.%u.%u",
1633                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1634                          G_FW_VERSION_MAJOR(fw_vers),
1635                          G_FW_VERSION_MINOR(fw_vers),
1636                          G_FW_VERSION_MICRO(fw_vers),
1637                          G_TP_VERSION_MAJOR(tp_vers),
1638                          G_TP_VERSION_MINOR(tp_vers),
1639                          G_TP_VERSION_MICRO(tp_vers));
1640 }
1641
1642 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1643 {
1644         if (stringset == ETH_SS_STATS)
1645                 memcpy(data, stats_strings, sizeof(stats_strings));
1646 }
1647
1648 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1649                                             struct port_info *p, int idx)
1650 {
1651         int i;
1652         unsigned long tot = 0;
1653
1654         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1655                 tot += adapter->sge.qs[i].port_stats[idx];
1656         return tot;
1657 }
1658
1659 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1660                       u64 *data)
1661 {
1662         struct port_info *pi = netdev_priv(dev);
1663         struct adapter *adapter = pi->adapter;
1664         const struct mac_stats *s;
1665
1666         spin_lock(&adapter->stats_lock);
1667         s = t3_mac_update_stats(&pi->mac);
1668         spin_unlock(&adapter->stats_lock);
1669
1670         *data++ = s->tx_octets;
1671         *data++ = s->tx_frames;
1672         *data++ = s->tx_mcast_frames;
1673         *data++ = s->tx_bcast_frames;
1674         *data++ = s->tx_pause;
1675         *data++ = s->tx_underrun;
1676         *data++ = s->tx_fifo_urun;
1677
1678         *data++ = s->tx_frames_64;
1679         *data++ = s->tx_frames_65_127;
1680         *data++ = s->tx_frames_128_255;
1681         *data++ = s->tx_frames_256_511;
1682         *data++ = s->tx_frames_512_1023;
1683         *data++ = s->tx_frames_1024_1518;
1684         *data++ = s->tx_frames_1519_max;
1685
1686         *data++ = s->rx_octets;
1687         *data++ = s->rx_frames;
1688         *data++ = s->rx_mcast_frames;
1689         *data++ = s->rx_bcast_frames;
1690         *data++ = s->rx_pause;
1691         *data++ = s->rx_fcs_errs;
1692         *data++ = s->rx_symbol_errs;
1693         *data++ = s->rx_short;
1694         *data++ = s->rx_jabber;
1695         *data++ = s->rx_too_long;
1696         *data++ = s->rx_fifo_ovfl;
1697
1698         *data++ = s->rx_frames_64;
1699         *data++ = s->rx_frames_65_127;
1700         *data++ = s->rx_frames_128_255;
1701         *data++ = s->rx_frames_256_511;
1702         *data++ = s->rx_frames_512_1023;
1703         *data++ = s->rx_frames_1024_1518;
1704         *data++ = s->rx_frames_1519_max;
1705
1706         *data++ = pi->phy.fifo_errors;
1707
1708         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1709         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1710         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1711         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1712         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1713         *data++ = 0;
1714         *data++ = 0;
1715         *data++ = 0;
1716         *data++ = s->rx_cong_drops;
1717
1718         *data++ = s->num_toggled;
1719         *data++ = s->num_resets;
1720
1721         *data++ = s->link_faults;
1722 }
1723
1724 static inline void reg_block_dump(struct adapter *ap, void *buf,
1725                                   unsigned int start, unsigned int end)
1726 {
1727         u32 *p = buf + start;
1728
1729         for (; start <= end; start += sizeof(u32))
1730                 *p++ = t3_read_reg(ap, start);
1731 }
1732
1733 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1734                      void *buf)
1735 {
1736         struct port_info *pi = netdev_priv(dev);
1737         struct adapter *ap = pi->adapter;
1738
1739         /*
1740          * Version scheme:
1741          * bits 0..9: chip version
1742          * bits 10..15: chip revision
1743          * bit 31: set for PCIe cards
1744          */
1745         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1746
1747         /*
1748          * We skip the MAC statistics registers because they are clear-on-read.
1749          * Also reading multi-register stats would need to synchronize with the
1750          * periodic mac stats accumulation.  Hard to justify the complexity.
1751          */
1752         memset(buf, 0, T3_REGMAP_SIZE);
1753         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1754         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1755         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1756         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1757         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1758         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1759                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1760         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1761                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1762 }
1763
1764 static int restart_autoneg(struct net_device *dev)
1765 {
1766         struct port_info *p = netdev_priv(dev);
1767
1768         if (!netif_running(dev))
1769                 return -EAGAIN;
1770         if (p->link_config.autoneg != AUTONEG_ENABLE)
1771                 return -EINVAL;
1772         p->phy.ops->autoneg_restart(&p->phy);
1773         return 0;
1774 }
1775
1776 static int set_phys_id(struct net_device *dev,
1777                        enum ethtool_phys_id_state state)
1778 {
1779         struct port_info *pi = netdev_priv(dev);
1780         struct adapter *adapter = pi->adapter;
1781
1782         switch (state) {
1783         case ETHTOOL_ID_ACTIVE:
1784                 return 1;       /* cycle on/off once per second */
1785
1786         case ETHTOOL_ID_OFF:
1787                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1788                 break;
1789
1790         case ETHTOOL_ID_ON:
1791         case ETHTOOL_ID_INACTIVE:
1792                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1793                          F_GPIO0_OUT_VAL);
1794         }
1795
1796         return 0;
1797 }
1798
1799 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1800 {
1801         struct port_info *p = netdev_priv(dev);
1802
1803         cmd->supported = p->link_config.supported;
1804         cmd->advertising = p->link_config.advertising;
1805
1806         if (netif_carrier_ok(dev)) {
1807                 ethtool_cmd_speed_set(cmd, p->link_config.speed);
1808                 cmd->duplex = p->link_config.duplex;
1809         } else {
1810                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
1811                 cmd->duplex = DUPLEX_UNKNOWN;
1812         }
1813
1814         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1815         cmd->phy_address = p->phy.mdio.prtad;
1816         cmd->transceiver = XCVR_EXTERNAL;
1817         cmd->autoneg = p->link_config.autoneg;
1818         cmd->maxtxpkt = 0;
1819         cmd->maxrxpkt = 0;
1820         return 0;
1821 }
1822
1823 static int speed_duplex_to_caps(int speed, int duplex)
1824 {
1825         int cap = 0;
1826
1827         switch (speed) {
1828         case SPEED_10:
1829                 if (duplex == DUPLEX_FULL)
1830                         cap = SUPPORTED_10baseT_Full;
1831                 else
1832                         cap = SUPPORTED_10baseT_Half;
1833                 break;
1834         case SPEED_100:
1835                 if (duplex == DUPLEX_FULL)
1836                         cap = SUPPORTED_100baseT_Full;
1837                 else
1838                         cap = SUPPORTED_100baseT_Half;
1839                 break;
1840         case SPEED_1000:
1841                 if (duplex == DUPLEX_FULL)
1842                         cap = SUPPORTED_1000baseT_Full;
1843                 else
1844                         cap = SUPPORTED_1000baseT_Half;
1845                 break;
1846         case SPEED_10000:
1847                 if (duplex == DUPLEX_FULL)
1848                         cap = SUPPORTED_10000baseT_Full;
1849         }
1850         return cap;
1851 }
1852
1853 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1854                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1855                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1856                       ADVERTISED_10000baseT_Full)
1857
1858 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1859 {
1860         struct port_info *p = netdev_priv(dev);
1861         struct link_config *lc = &p->link_config;
1862
1863         if (!(lc->supported & SUPPORTED_Autoneg)) {
1864                 /*
1865                  * PHY offers a single speed/duplex.  See if that's what's
1866                  * being requested.
1867                  */
1868                 if (cmd->autoneg == AUTONEG_DISABLE) {
1869                         u32 speed = ethtool_cmd_speed(cmd);
1870                         int cap = speed_duplex_to_caps(speed, cmd->duplex);
1871                         if (lc->supported & cap)
1872                                 return 0;
1873                 }
1874                 return -EINVAL;
1875         }
1876
1877         if (cmd->autoneg == AUTONEG_DISABLE) {
1878                 u32 speed = ethtool_cmd_speed(cmd);
1879                 int cap = speed_duplex_to_caps(speed, cmd->duplex);
1880
1881                 if (!(lc->supported & cap) || (speed == SPEED_1000))
1882                         return -EINVAL;
1883                 lc->requested_speed = speed;
1884                 lc->requested_duplex = cmd->duplex;
1885                 lc->advertising = 0;
1886         } else {
1887                 cmd->advertising &= ADVERTISED_MASK;
1888                 cmd->advertising &= lc->supported;
1889                 if (!cmd->advertising)
1890                         return -EINVAL;
1891                 lc->requested_speed = SPEED_INVALID;
1892                 lc->requested_duplex = DUPLEX_INVALID;
1893                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1894         }
1895         lc->autoneg = cmd->autoneg;
1896         if (netif_running(dev))
1897                 t3_link_start(&p->phy, &p->mac, lc);
1898         return 0;
1899 }
1900
1901 static void get_pauseparam(struct net_device *dev,
1902                            struct ethtool_pauseparam *epause)
1903 {
1904         struct port_info *p = netdev_priv(dev);
1905
1906         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1907         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1908         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1909 }
1910
1911 static int set_pauseparam(struct net_device *dev,
1912                           struct ethtool_pauseparam *epause)
1913 {
1914         struct port_info *p = netdev_priv(dev);
1915         struct link_config *lc = &p->link_config;
1916
1917         if (epause->autoneg == AUTONEG_DISABLE)
1918                 lc->requested_fc = 0;
1919         else if (lc->supported & SUPPORTED_Autoneg)
1920                 lc->requested_fc = PAUSE_AUTONEG;
1921         else
1922                 return -EINVAL;
1923
1924         if (epause->rx_pause)
1925                 lc->requested_fc |= PAUSE_RX;
1926         if (epause->tx_pause)
1927                 lc->requested_fc |= PAUSE_TX;
1928         if (lc->autoneg == AUTONEG_ENABLE) {
1929                 if (netif_running(dev))
1930                         t3_link_start(&p->phy, &p->mac, lc);
1931         } else {
1932                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1933                 if (netif_running(dev))
1934                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1935         }
1936         return 0;
1937 }
1938
1939 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1940 {
1941         struct port_info *pi = netdev_priv(dev);
1942         struct adapter *adapter = pi->adapter;
1943         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1944
1945         e->rx_max_pending = MAX_RX_BUFFERS;
1946         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1947         e->tx_max_pending = MAX_TXQ_ENTRIES;
1948
1949         e->rx_pending = q->fl_size;
1950         e->rx_mini_pending = q->rspq_size;
1951         e->rx_jumbo_pending = q->jumbo_size;
1952         e->tx_pending = q->txq_size[0];
1953 }
1954
1955 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1956 {
1957         struct port_info *pi = netdev_priv(dev);
1958         struct adapter *adapter = pi->adapter;
1959         struct qset_params *q;
1960         int i;
1961
1962         if (e->rx_pending > MAX_RX_BUFFERS ||
1963             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1964             e->tx_pending > MAX_TXQ_ENTRIES ||
1965             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1966             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1967             e->rx_pending < MIN_FL_ENTRIES ||
1968             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1969             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1970                 return -EINVAL;
1971
1972         if (adapter->flags & FULL_INIT_DONE)
1973                 return -EBUSY;
1974
1975         q = &adapter->params.sge.qset[pi->first_qset];
1976         for (i = 0; i < pi->nqsets; ++i, ++q) {
1977                 q->rspq_size = e->rx_mini_pending;
1978                 q->fl_size = e->rx_pending;
1979                 q->jumbo_size = e->rx_jumbo_pending;
1980                 q->txq_size[0] = e->tx_pending;
1981                 q->txq_size[1] = e->tx_pending;
1982                 q->txq_size[2] = e->tx_pending;
1983         }
1984         return 0;
1985 }
1986
1987 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1988 {
1989         struct port_info *pi = netdev_priv(dev);
1990         struct adapter *adapter = pi->adapter;
1991         struct qset_params *qsp;
1992         struct sge_qset *qs;
1993         int i;
1994
1995         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1996                 return -EINVAL;
1997
1998         for (i = 0; i < pi->nqsets; i++) {
1999                 qsp = &adapter->params.sge.qset[i];
2000                 qs = &adapter->sge.qs[i];
2001                 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2002                 t3_update_qset_coalesce(qs, qsp);
2003         }
2004
2005         return 0;
2006 }
2007
2008 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2009 {
2010         struct port_info *pi = netdev_priv(dev);
2011         struct adapter *adapter = pi->adapter;
2012         struct qset_params *q = adapter->params.sge.qset;
2013
2014         c->rx_coalesce_usecs = q->coalesce_usecs;
2015         return 0;
2016 }
2017
2018 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2019                       u8 * data)
2020 {
2021         struct port_info *pi = netdev_priv(dev);
2022         struct adapter *adapter = pi->adapter;
2023         int i, err = 0;
2024
2025         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2026         if (!buf)
2027                 return -ENOMEM;
2028
2029         e->magic = EEPROM_MAGIC;
2030         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2031                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2032
2033         if (!err)
2034                 memcpy(data, buf + e->offset, e->len);
2035         kfree(buf);
2036         return err;
2037 }
2038
2039 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2040                       u8 * data)
2041 {
2042         struct port_info *pi = netdev_priv(dev);
2043         struct adapter *adapter = pi->adapter;
2044         u32 aligned_offset, aligned_len;
2045         __le32 *p;
2046         u8 *buf;
2047         int err;
2048
2049         if (eeprom->magic != EEPROM_MAGIC)
2050                 return -EINVAL;
2051
2052         aligned_offset = eeprom->offset & ~3;
2053         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2054
2055         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2056                 buf = kmalloc(aligned_len, GFP_KERNEL);
2057                 if (!buf)
2058                         return -ENOMEM;
2059                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2060                 if (!err && aligned_len > 4)
2061                         err = t3_seeprom_read(adapter,
2062                                               aligned_offset + aligned_len - 4,
2063                                               (__le32 *) & buf[aligned_len - 4]);
2064                 if (err)
2065                         goto out;
2066                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2067         } else
2068                 buf = data;
2069
2070         err = t3_seeprom_wp(adapter, 0);
2071         if (err)
2072                 goto out;
2073
2074         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2075                 err = t3_seeprom_write(adapter, aligned_offset, *p);
2076                 aligned_offset += 4;
2077         }
2078
2079         if (!err)
2080                 err = t3_seeprom_wp(adapter, 1);
2081 out:
2082         if (buf != data)
2083                 kfree(buf);
2084         return err;
2085 }
2086
2087 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2088 {
2089         wol->supported = 0;
2090         wol->wolopts = 0;
2091         memset(&wol->sopass, 0, sizeof(wol->sopass));
2092 }
2093
2094 static const struct ethtool_ops cxgb_ethtool_ops = {
2095         .get_settings = get_settings,
2096         .set_settings = set_settings,
2097         .get_drvinfo = get_drvinfo,
2098         .get_msglevel = get_msglevel,
2099         .set_msglevel = set_msglevel,
2100         .get_ringparam = get_sge_param,
2101         .set_ringparam = set_sge_param,
2102         .get_coalesce = get_coalesce,
2103         .set_coalesce = set_coalesce,
2104         .get_eeprom_len = get_eeprom_len,
2105         .get_eeprom = get_eeprom,
2106         .set_eeprom = set_eeprom,
2107         .get_pauseparam = get_pauseparam,
2108         .set_pauseparam = set_pauseparam,
2109         .get_link = ethtool_op_get_link,
2110         .get_strings = get_strings,
2111         .set_phys_id = set_phys_id,
2112         .nway_reset = restart_autoneg,
2113         .get_sset_count = get_sset_count,
2114         .get_ethtool_stats = get_stats,
2115         .get_regs_len = get_regs_len,
2116         .get_regs = get_regs,
2117         .get_wol = get_wol,
2118 };
2119
2120 static int in_range(int val, int lo, int hi)
2121 {
2122         return val < 0 || (val <= hi && val >= lo);
2123 }
2124
2125 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2126 {
2127         struct port_info *pi = netdev_priv(dev);
2128         struct adapter *adapter = pi->adapter;
2129         u32 cmd;
2130         int ret;
2131
2132         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2133                 return -EFAULT;
2134
2135         switch (cmd) {
2136         case CHELSIO_SET_QSET_PARAMS:{
2137                 int i;
2138                 struct qset_params *q;
2139                 struct ch_qset_params t;
2140                 int q1 = pi->first_qset;
2141                 int nqsets = pi->nqsets;
2142
2143                 if (!capable(CAP_NET_ADMIN))
2144                         return -EPERM;
2145                 if (copy_from_user(&t, useraddr, sizeof(t)))
2146                         return -EFAULT;
2147                 if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2148                         return -EINVAL;
2149                 if (t.qset_idx >= SGE_QSETS)
2150                         return -EINVAL;
2151                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2152                     !in_range(t.cong_thres, 0, 255) ||
2153                     !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2154                               MAX_TXQ_ENTRIES) ||
2155                     !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2156                               MAX_TXQ_ENTRIES) ||
2157                     !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2158                               MAX_CTRL_TXQ_ENTRIES) ||
2159                     !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2160                               MAX_RX_BUFFERS) ||
2161                     !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2162                               MAX_RX_JUMBO_BUFFERS) ||
2163                     !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2164                               MAX_RSPQ_ENTRIES))
2165                         return -EINVAL;
2166
2167                 if ((adapter->flags & FULL_INIT_DONE) &&
2168                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2169                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2170                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2171                         t.polling >= 0 || t.cong_thres >= 0))
2172                         return -EBUSY;
2173
2174                 /* Allow setting of any available qset when offload enabled */
2175                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2176                         q1 = 0;
2177                         for_each_port(adapter, i) {
2178                                 pi = adap2pinfo(adapter, i);
2179                                 nqsets += pi->first_qset + pi->nqsets;
2180                         }
2181                 }
2182
2183                 if (t.qset_idx < q1)
2184                         return -EINVAL;
2185                 if (t.qset_idx > q1 + nqsets - 1)
2186                         return -EINVAL;
2187
2188                 q = &adapter->params.sge.qset[t.qset_idx];
2189
2190                 if (t.rspq_size >= 0)
2191                         q->rspq_size = t.rspq_size;
2192                 if (t.fl_size[0] >= 0)
2193                         q->fl_size = t.fl_size[0];
2194                 if (t.fl_size[1] >= 0)
2195                         q->jumbo_size = t.fl_size[1];
2196                 if (t.txq_size[0] >= 0)
2197                         q->txq_size[0] = t.txq_size[0];
2198                 if (t.txq_size[1] >= 0)
2199                         q->txq_size[1] = t.txq_size[1];
2200                 if (t.txq_size[2] >= 0)
2201                         q->txq_size[2] = t.txq_size[2];
2202                 if (t.cong_thres >= 0)
2203                         q->cong_thres = t.cong_thres;
2204                 if (t.intr_lat >= 0) {
2205                         struct sge_qset *qs =
2206                                 &adapter->sge.qs[t.qset_idx];
2207
2208                         q->coalesce_usecs = t.intr_lat;
2209                         t3_update_qset_coalesce(qs, q);
2210                 }
2211                 if (t.polling >= 0) {
2212                         if (adapter->flags & USING_MSIX)
2213                                 q->polling = t.polling;
2214                         else {
2215                                 /* No polling with INTx for T3A */
2216                                 if (adapter->params.rev == 0 &&
2217                                         !(adapter->flags & USING_MSI))
2218                                         t.polling = 0;
2219
2220                                 for (i = 0; i < SGE_QSETS; i++) {
2221                                         q = &adapter->params.sge.
2222                                                 qset[i];
2223                                         q->polling = t.polling;
2224                                 }
2225                         }
2226                 }
2227
2228                 if (t.lro >= 0) {
2229                         if (t.lro)
2230                                 dev->wanted_features |= NETIF_F_GRO;
2231                         else
2232                                 dev->wanted_features &= ~NETIF_F_GRO;
2233                         netdev_update_features(dev);
2234                 }
2235
2236                 break;
2237         }
2238         case CHELSIO_GET_QSET_PARAMS:{
2239                 struct qset_params *q;
2240                 struct ch_qset_params t;
2241                 int q1 = pi->first_qset;
2242                 int nqsets = pi->nqsets;
2243                 int i;
2244
2245                 if (copy_from_user(&t, useraddr, sizeof(t)))
2246                         return -EFAULT;
2247
2248                 if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2249                         return -EINVAL;
2250
2251                 /* Display qsets for all ports when offload enabled */
2252                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2253                         q1 = 0;
2254                         for_each_port(adapter, i) {
2255                                 pi = adap2pinfo(adapter, i);
2256                                 nqsets = pi->first_qset + pi->nqsets;
2257                         }
2258                 }
2259
2260                 if (t.qset_idx >= nqsets)
2261                         return -EINVAL;
2262                 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2263
2264                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2265                 t.rspq_size = q->rspq_size;
2266                 t.txq_size[0] = q->txq_size[0];
2267                 t.txq_size[1] = q->txq_size[1];
2268                 t.txq_size[2] = q->txq_size[2];
2269                 t.fl_size[0] = q->fl_size;
2270                 t.fl_size[1] = q->jumbo_size;
2271                 t.polling = q->polling;
2272                 t.lro = !!(dev->features & NETIF_F_GRO);
2273                 t.intr_lat = q->coalesce_usecs;
2274                 t.cong_thres = q->cong_thres;
2275                 t.qnum = q1;
2276
2277                 if (adapter->flags & USING_MSIX)
2278                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2279                 else
2280                         t.vector = adapter->pdev->irq;
2281
2282                 if (copy_to_user(useraddr, &t, sizeof(t)))
2283                         return -EFAULT;
2284                 break;
2285         }
2286         case CHELSIO_SET_QSET_NUM:{
2287                 struct ch_reg edata;
2288                 unsigned int i, first_qset = 0, other_qsets = 0;
2289
2290                 if (!capable(CAP_NET_ADMIN))
2291                         return -EPERM;
2292                 if (adapter->flags & FULL_INIT_DONE)
2293                         return -EBUSY;
2294                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2295                         return -EFAULT;
2296                 if (edata.cmd != CHELSIO_SET_QSET_NUM)
2297                         return -EINVAL;
2298                 if (edata.val < 1 ||
2299                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2300                         return -EINVAL;
2301
2302                 for_each_port(adapter, i)
2303                         if (adapter->port[i] && adapter->port[i] != dev)
2304                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2305
2306                 if (edata.val + other_qsets > SGE_QSETS)
2307                         return -EINVAL;
2308
2309                 pi->nqsets = edata.val;
2310
2311                 for_each_port(adapter, i)
2312                         if (adapter->port[i]) {
2313                                 pi = adap2pinfo(adapter, i);
2314                                 pi->first_qset = first_qset;
2315                                 first_qset += pi->nqsets;
2316                         }
2317                 break;
2318         }
2319         case CHELSIO_GET_QSET_NUM:{
2320                 struct ch_reg edata;
2321
2322                 memset(&edata, 0, sizeof(struct ch_reg));
2323
2324                 edata.cmd = CHELSIO_GET_QSET_NUM;
2325                 edata.val = pi->nqsets;
2326                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2327                         return -EFAULT;
2328                 break;
2329         }
2330         case CHELSIO_LOAD_FW:{
2331                 u8 *fw_data;
2332                 struct ch_mem_range t;
2333
2334                 if (!capable(CAP_SYS_RAWIO))
2335                         return -EPERM;
2336                 if (copy_from_user(&t, useraddr, sizeof(t)))
2337                         return -EFAULT;
2338                 if (t.cmd != CHELSIO_LOAD_FW)
2339                         return -EINVAL;
2340                 /* Check t.len sanity ? */
2341                 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2342                 if (IS_ERR(fw_data))
2343                         return PTR_ERR(fw_data);
2344
2345                 ret = t3_load_fw(adapter, fw_data, t.len);
2346                 kfree(fw_data);
2347                 if (ret)
2348                         return ret;
2349                 break;
2350         }
2351         case CHELSIO_SETMTUTAB:{
2352                 struct ch_mtus m;
2353                 int i;
2354
2355                 if (!is_offload(adapter))
2356                         return -EOPNOTSUPP;
2357                 if (!capable(CAP_NET_ADMIN))
2358                         return -EPERM;
2359                 if (offload_running(adapter))
2360                         return -EBUSY;
2361                 if (copy_from_user(&m, useraddr, sizeof(m)))
2362                         return -EFAULT;
2363                 if (m.cmd != CHELSIO_SETMTUTAB)
2364                         return -EINVAL;
2365                 if (m.nmtus != NMTUS)
2366                         return -EINVAL;
2367                 if (m.mtus[0] < 81)     /* accommodate SACK */
2368                         return -EINVAL;
2369
2370                 /* MTUs must be in ascending order */
2371                 for (i = 1; i < NMTUS; ++i)
2372                         if (m.mtus[i] < m.mtus[i - 1])
2373                                 return -EINVAL;
2374
2375                 memcpy(adapter->params.mtus, m.mtus,
2376                         sizeof(adapter->params.mtus));
2377                 break;
2378         }
2379         case CHELSIO_GET_PM:{
2380                 struct tp_params *p = &adapter->params.tp;
2381                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2382
2383                 if (!is_offload(adapter))
2384                         return -EOPNOTSUPP;
2385                 m.tx_pg_sz = p->tx_pg_size;
2386                 m.tx_num_pg = p->tx_num_pgs;
2387                 m.rx_pg_sz = p->rx_pg_size;
2388                 m.rx_num_pg = p->rx_num_pgs;
2389                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2390                 if (copy_to_user(useraddr, &m, sizeof(m)))
2391                         return -EFAULT;
2392                 break;
2393         }
2394         case CHELSIO_SET_PM:{
2395                 struct ch_pm m;
2396                 struct tp_params *p = &adapter->params.tp;
2397
2398                 if (!is_offload(adapter))
2399                         return -EOPNOTSUPP;
2400                 if (!capable(CAP_NET_ADMIN))
2401                         return -EPERM;
2402                 if (adapter->flags & FULL_INIT_DONE)
2403                         return -EBUSY;
2404                 if (copy_from_user(&m, useraddr, sizeof(m)))
2405                         return -EFAULT;
2406                 if (m.cmd != CHELSIO_SET_PM)
2407                         return -EINVAL;
2408                 if (!is_power_of_2(m.rx_pg_sz) ||
2409                         !is_power_of_2(m.tx_pg_sz))
2410                         return -EINVAL; /* not power of 2 */
2411                 if (!(m.rx_pg_sz & 0x14000))
2412                         return -EINVAL; /* not 16KB or 64KB */
2413                 if (!(m.tx_pg_sz & 0x1554000))
2414                         return -EINVAL;
2415                 if (m.tx_num_pg == -1)
2416                         m.tx_num_pg = p->tx_num_pgs;
2417                 if (m.rx_num_pg == -1)
2418                         m.rx_num_pg = p->rx_num_pgs;
2419                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2420                         return -EINVAL;
2421                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2422                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2423                         return -EINVAL;
2424                 p->rx_pg_size = m.rx_pg_sz;
2425                 p->tx_pg_size = m.tx_pg_sz;
2426                 p->rx_num_pgs = m.rx_num_pg;
2427                 p->tx_num_pgs = m.tx_num_pg;
2428                 break;
2429         }
2430         case CHELSIO_GET_MEM:{
2431                 struct ch_mem_range t;
2432                 struct mc7 *mem;
2433                 u64 buf[32];
2434
2435                 if (!is_offload(adapter))
2436                         return -EOPNOTSUPP;
2437                 if (!capable(CAP_NET_ADMIN))
2438                         return -EPERM;
2439                 if (!(adapter->flags & FULL_INIT_DONE))
2440                         return -EIO;    /* need the memory controllers */
2441                 if (copy_from_user(&t, useraddr, sizeof(t)))
2442                         return -EFAULT;
2443                 if (t.cmd != CHELSIO_GET_MEM)
2444                         return -EINVAL;
2445                 if ((t.addr & 7) || (t.len & 7))
2446                         return -EINVAL;
2447                 if (t.mem_id == MEM_CM)
2448                         mem = &adapter->cm;
2449                 else if (t.mem_id == MEM_PMRX)
2450                         mem = &adapter->pmrx;
2451                 else if (t.mem_id == MEM_PMTX)
2452                         mem = &adapter->pmtx;
2453                 else
2454                         return -EINVAL;
2455
2456                 /*
2457                  * Version scheme:
2458                  * bits 0..9: chip version
2459                  * bits 10..15: chip revision
2460                  */
2461                 t.version = 3 | (adapter->params.rev << 10);
2462                 if (copy_to_user(useraddr, &t, sizeof(t)))
2463                         return -EFAULT;
2464
2465                 /*
2466                  * Read 256 bytes at a time as len can be large and we don't
2467                  * want to use huge intermediate buffers.
2468                  */
2469                 useraddr += sizeof(t);  /* advance to start of buffer */
2470                 while (t.len) {
2471                         unsigned int chunk =
2472                                 min_t(unsigned int, t.len, sizeof(buf));
2473
2474                         ret =
2475                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2476                                                 buf);
2477                         if (ret)
2478                                 return ret;
2479                         if (copy_to_user(useraddr, buf, chunk))
2480                                 return -EFAULT;
2481                         useraddr += chunk;
2482                         t.addr += chunk;
2483                         t.len -= chunk;
2484                 }
2485                 break;
2486         }
2487         case CHELSIO_SET_TRACE_FILTER:{
2488                 struct ch_trace t;
2489                 const struct trace_params *tp;
2490
2491                 if (!capable(CAP_NET_ADMIN))
2492                         return -EPERM;
2493                 if (!offload_running(adapter))
2494                         return -EAGAIN;
2495                 if (copy_from_user(&t, useraddr, sizeof(t)))
2496                         return -EFAULT;
2497                 if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2498                         return -EINVAL;
2499
2500                 tp = (const struct trace_params *)&t.sip;
2501                 if (t.config_tx)
2502                         t3_config_trace_filter(adapter, tp, 0,
2503                                                 t.invert_match,
2504                                                 t.trace_tx);
2505                 if (t.config_rx)
2506                         t3_config_trace_filter(adapter, tp, 1,
2507                                                 t.invert_match,
2508                                                 t.trace_rx);
2509                 break;
2510         }
2511         default:
2512                 return -EOPNOTSUPP;
2513         }
2514         return 0;
2515 }
2516
2517 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2518 {
2519         struct mii_ioctl_data *data = if_mii(req);
2520         struct port_info *pi = netdev_priv(dev);
2521         struct adapter *adapter = pi->adapter;
2522
2523         switch (cmd) {
2524         case SIOCGMIIREG:
2525         case SIOCSMIIREG:
2526                 /* Convert phy_id from older PRTAD/DEVAD format */
2527                 if (is_10G(adapter) &&
2528                     !mdio_phy_id_is_c45(data->phy_id) &&
2529                     (data->phy_id & 0x1f00) &&
2530                     !(data->phy_id & 0xe0e0))
2531                         data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2532                                                        data->phy_id & 0x1f);
2533                 /* FALLTHRU */
2534         case SIOCGMIIPHY:
2535                 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2536         case SIOCCHIOCTL:
2537                 return cxgb_extension_ioctl(dev, req->ifr_data);
2538         default:
2539                 return -EOPNOTSUPP;
2540         }
2541 }
2542
2543 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2544 {
2545         struct port_info *pi = netdev_priv(dev);
2546         struct adapter *adapter = pi->adapter;
2547         int ret;
2548
2549         if (new_mtu < 81)       /* accommodate SACK */
2550                 return -EINVAL;
2551         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2552                 return ret;
2553         dev->mtu = new_mtu;
2554         init_port_mtus(adapter);
2555         if (adapter->params.rev == 0 && offload_running(adapter))
2556                 t3_load_mtus(adapter, adapter->params.mtus,
2557                              adapter->params.a_wnd, adapter->params.b_wnd,
2558                              adapter->port[0]->mtu);
2559         return 0;
2560 }
2561
2562 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2563 {
2564         struct port_info *pi = netdev_priv(dev);
2565         struct adapter *adapter = pi->adapter;
2566         struct sockaddr *addr = p;
2567
2568         if (!is_valid_ether_addr(addr->sa_data))
2569                 return -EADDRNOTAVAIL;
2570
2571         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2572         t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2573         if (offload_running(adapter))
2574                 write_smt_entry(adapter, pi->port_id);
2575         return 0;
2576 }
2577
2578 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2579         netdev_features_t features)
2580 {
2581         /*
2582          * Since there is no support for separate rx/tx vlan accel
2583          * enable/disable make sure tx flag is always in same state as rx.
2584          */
2585         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2586                 features |= NETIF_F_HW_VLAN_CTAG_TX;
2587         else
2588                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2589
2590         return features;
2591 }
2592
2593 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2594 {
2595         netdev_features_t changed = dev->features ^ features;
2596
2597         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2598                 cxgb_vlan_mode(dev, features);
2599
2600         return 0;
2601 }
2602
2603 #ifdef CONFIG_NET_POLL_CONTROLLER
2604 static void cxgb_netpoll(struct net_device *dev)
2605 {
2606         struct port_info *pi = netdev_priv(dev);
2607         struct adapter *adapter = pi->adapter;
2608         int qidx;
2609
2610         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2611                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2612                 void *source;
2613
2614                 if (adapter->flags & USING_MSIX)
2615                         source = qs;
2616                 else
2617                         source = adapter;
2618
2619                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2620         }
2621 }
2622 #endif
2623
2624 /*
2625  * Periodic accumulation of MAC statistics.
2626  */
2627 static void mac_stats_update(struct adapter *adapter)
2628 {
2629         int i;
2630
2631         for_each_port(adapter, i) {
2632                 struct net_device *dev = adapter->port[i];
2633                 struct port_info *p = netdev_priv(dev);
2634
2635                 if (netif_running(dev)) {
2636                         spin_lock(&adapter->stats_lock);
2637                         t3_mac_update_stats(&p->mac);
2638                         spin_unlock(&adapter->stats_lock);
2639                 }
2640         }
2641 }
2642
2643 static void check_link_status(struct adapter *adapter)
2644 {
2645         int i;
2646
2647         for_each_port(adapter, i) {
2648                 struct net_device *dev = adapter->port[i];
2649                 struct port_info *p = netdev_priv(dev);
2650                 int link_fault;
2651
2652                 spin_lock_irq(&adapter->work_lock);
2653                 link_fault = p->link_fault;
2654                 spin_unlock_irq(&adapter->work_lock);
2655
2656                 if (link_fault) {
2657                         t3_link_fault(adapter, i);
2658                         continue;
2659                 }
2660
2661                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2662                         t3_xgm_intr_disable(adapter, i);
2663                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2664
2665                         t3_link_changed(adapter, i);
2666                         t3_xgm_intr_enable(adapter, i);
2667                 }
2668         }
2669 }
2670
2671 static void check_t3b2_mac(struct adapter *adapter)
2672 {
2673         int i;
2674
2675         if (!rtnl_trylock())    /* synchronize with ifdown */
2676                 return;
2677
2678         for_each_port(adapter, i) {
2679                 struct net_device *dev = adapter->port[i];
2680                 struct port_info *p = netdev_priv(dev);
2681                 int status;
2682
2683                 if (!netif_running(dev))
2684                         continue;
2685
2686                 status = 0;
2687                 if (netif_running(dev) && netif_carrier_ok(dev))
2688                         status = t3b2_mac_watchdog_task(&p->mac);
2689                 if (status == 1)
2690                         p->mac.stats.num_toggled++;
2691                 else if (status == 2) {
2692                         struct cmac *mac = &p->mac;
2693
2694                         t3_mac_set_mtu(mac, dev->mtu);
2695                         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2696                         cxgb_set_rxmode(dev);
2697                         t3_link_start(&p->phy, mac, &p->link_config);
2698                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2699                         t3_port_intr_enable(adapter, p->port_id);
2700                         p->mac.stats.num_resets++;
2701                 }
2702         }
2703         rtnl_unlock();
2704 }
2705
2706
2707 static void t3_adap_check_task(struct work_struct *work)
2708 {
2709         struct adapter *adapter = container_of(work, struct adapter,
2710                                                adap_check_task.work);
2711         const struct adapter_params *p = &adapter->params;
2712         int port;
2713         unsigned int v, status, reset;
2714
2715         adapter->check_task_cnt++;
2716
2717         check_link_status(adapter);
2718
2719         /* Accumulate MAC stats if needed */
2720         if (!p->linkpoll_period ||
2721             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2722             p->stats_update_period) {
2723                 mac_stats_update(adapter);
2724                 adapter->check_task_cnt = 0;
2725         }
2726
2727         if (p->rev == T3_REV_B2)
2728                 check_t3b2_mac(adapter);
2729
2730         /*
2731          * Scan the XGMAC's to check for various conditions which we want to
2732          * monitor in a periodic polling manner rather than via an interrupt
2733          * condition.  This is used for conditions which would otherwise flood
2734          * the system with interrupts and we only really need to know that the
2735          * conditions are "happening" ...  For each condition we count the
2736          * detection of the condition and reset it for the next polling loop.
2737          */
2738         for_each_port(adapter, port) {
2739                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2740                 u32 cause;
2741
2742                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2743                 reset = 0;
2744                 if (cause & F_RXFIFO_OVERFLOW) {
2745                         mac->stats.rx_fifo_ovfl++;
2746                         reset |= F_RXFIFO_OVERFLOW;
2747                 }
2748
2749                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2750         }
2751
2752         /*
2753          * We do the same as above for FL_EMPTY interrupts.
2754          */
2755         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2756         reset = 0;
2757
2758         if (status & F_FLEMPTY) {
2759                 struct sge_qset *qs = &adapter->sge.qs[0];
2760                 int i = 0;
2761
2762                 reset |= F_FLEMPTY;
2763
2764                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2765                     0xffff;
2766
2767                 while (v) {
2768                         qs->fl[i].empty += (v & 1);
2769                         if (i)
2770                                 qs++;
2771                         i ^= 1;
2772                         v >>= 1;
2773                 }
2774         }
2775
2776         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2777
2778         /* Schedule the next check update if any port is active. */
2779         spin_lock_irq(&adapter->work_lock);
2780         if (adapter->open_device_map & PORT_MASK)
2781                 schedule_chk_task(adapter);
2782         spin_unlock_irq(&adapter->work_lock);
2783 }
2784
2785 static void db_full_task(struct work_struct *work)
2786 {
2787         struct adapter *adapter = container_of(work, struct adapter,
2788                                                db_full_task);
2789
2790         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2791 }
2792
2793 static void db_empty_task(struct work_struct *work)
2794 {
2795         struct adapter *adapter = container_of(work, struct adapter,
2796                                                db_empty_task);
2797
2798         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2799 }
2800
2801 static void db_drop_task(struct work_struct *work)
2802 {
2803         struct adapter *adapter = container_of(work, struct adapter,
2804                                                db_drop_task);
2805         unsigned long delay = 1000;
2806         unsigned short r;
2807
2808         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2809
2810         /*
2811          * Sleep a while before ringing the driver qset dbs.
2812          * The delay is between 1000-2023 usecs.
2813          */
2814         get_random_bytes(&r, 2);
2815         delay += r & 1023;
2816         set_current_state(TASK_UNINTERRUPTIBLE);
2817         schedule_timeout(usecs_to_jiffies(delay));
2818         ring_dbs(adapter);
2819 }
2820
2821 /*
2822  * Processes external (PHY) interrupts in process context.
2823  */
2824 static void ext_intr_task(struct work_struct *work)
2825 {
2826         struct adapter *adapter = container_of(work, struct adapter,
2827                                                ext_intr_handler_task);
2828         int i;
2829
2830         /* Disable link fault interrupts */
2831         for_each_port(adapter, i) {
2832                 struct net_device *dev = adapter->port[i];
2833                 struct port_info *p = netdev_priv(dev);
2834
2835                 t3_xgm_intr_disable(adapter, i);
2836                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2837         }
2838
2839         /* Re-enable link fault interrupts */
2840         t3_phy_intr_handler(adapter);
2841
2842         for_each_port(adapter, i)
2843                 t3_xgm_intr_enable(adapter, i);
2844
2845         /* Now reenable external interrupts */
2846         spin_lock_irq(&adapter->work_lock);
2847         if (adapter->slow_intr_mask) {
2848                 adapter->slow_intr_mask |= F_T3DBG;
2849                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2850                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2851                              adapter->slow_intr_mask);
2852         }
2853         spin_unlock_irq(&adapter->work_lock);
2854 }
2855
2856 /*
2857  * Interrupt-context handler for external (PHY) interrupts.
2858  */
2859 void t3_os_ext_intr_handler(struct adapter *adapter)
2860 {
2861         /*
2862          * Schedule a task to handle external interrupts as they may be slow
2863          * and we use a mutex to protect MDIO registers.  We disable PHY
2864          * interrupts in the meantime and let the task reenable them when
2865          * it's done.
2866          */
2867         spin_lock(&adapter->work_lock);
2868         if (adapter->slow_intr_mask) {
2869                 adapter->slow_intr_mask &= ~F_T3DBG;
2870                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2871                              adapter->slow_intr_mask);
2872                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2873         }
2874         spin_unlock(&adapter->work_lock);
2875 }
2876
2877 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2878 {
2879         struct net_device *netdev = adapter->port[port_id];
2880         struct port_info *pi = netdev_priv(netdev);
2881
2882         spin_lock(&adapter->work_lock);
2883         pi->link_fault = 1;
2884         spin_unlock(&adapter->work_lock);
2885 }
2886
2887 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2888 {
2889         int i, ret = 0;
2890
2891         if (is_offload(adapter) &&
2892             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2893                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2894                 offload_close(&adapter->tdev);
2895         }
2896
2897         /* Stop all ports */
2898         for_each_port(adapter, i) {
2899                 struct net_device *netdev = adapter->port[i];
2900
2901                 if (netif_running(netdev))
2902                         __cxgb_close(netdev, on_wq);
2903         }
2904
2905         /* Stop SGE timers */
2906         t3_stop_sge_timers(adapter);
2907
2908         adapter->flags &= ~FULL_INIT_DONE;
2909
2910         if (reset)
2911                 ret = t3_reset_adapter(adapter);
2912
2913         pci_disable_device(adapter->pdev);
2914
2915         return ret;
2916 }
2917
2918 static int t3_reenable_adapter(struct adapter *adapter)
2919 {
2920         if (pci_enable_device(adapter->pdev)) {
2921                 dev_err(&adapter->pdev->dev,
2922                         "Cannot re-enable PCI device after reset.\n");
2923                 goto err;
2924         }
2925         pci_set_master(adapter->pdev);
2926         pci_restore_state(adapter->pdev);
2927         pci_save_state(adapter->pdev);
2928
2929         /* Free sge resources */
2930         t3_free_sge_resources(adapter);
2931
2932         if (t3_replay_prep_adapter(adapter))
2933                 goto err;
2934
2935         return 0;
2936 err:
2937         return -1;
2938 }
2939
2940 static void t3_resume_ports(struct adapter *adapter)
2941 {
2942         int i;
2943
2944         /* Restart the ports */
2945         for_each_port(adapter, i) {
2946                 struct net_device *netdev = adapter->port[i];
2947
2948                 if (netif_running(netdev)) {
2949                         if (cxgb_open(netdev)) {
2950                                 dev_err(&adapter->pdev->dev,
2951                                         "can't bring device back up"
2952                                         " after reset\n");
2953                                 continue;
2954                         }
2955                 }
2956         }
2957
2958         if (is_offload(adapter) && !ofld_disable)
2959                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2960 }
2961
2962 /*
2963  * processes a fatal error.
2964  * Bring the ports down, reset the chip, bring the ports back up.
2965  */
2966 static void fatal_error_task(struct work_struct *work)
2967 {
2968         struct adapter *adapter = container_of(work, struct adapter,
2969                                                fatal_error_handler_task);
2970         int err = 0;
2971
2972         rtnl_lock();
2973         err = t3_adapter_error(adapter, 1, 1);
2974         if (!err)
2975                 err = t3_reenable_adapter(adapter);
2976         if (!err)
2977                 t3_resume_ports(adapter);
2978
2979         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2980         rtnl_unlock();
2981 }
2982
2983 void t3_fatal_err(struct adapter *adapter)
2984 {
2985         unsigned int fw_status[4];
2986
2987         if (adapter->flags & FULL_INIT_DONE) {
2988                 t3_sge_stop(adapter);
2989                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2990                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2991                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2992                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2993
2994                 spin_lock(&adapter->work_lock);
2995                 t3_intr_disable(adapter);
2996                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2997                 spin_unlock(&adapter->work_lock);
2998         }
2999         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3000         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3001                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3002                          fw_status[0], fw_status[1],
3003                          fw_status[2], fw_status[3]);
3004 }
3005
3006 /**
3007  * t3_io_error_detected - called when PCI error is detected
3008  * @pdev: Pointer to PCI device
3009  * @state: The current pci connection state
3010  *
3011  * This function is called after a PCI bus error affecting
3012  * this device has been detected.
3013  */
3014 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3015                                              pci_channel_state_t state)
3016 {
3017         struct adapter *adapter = pci_get_drvdata(pdev);
3018
3019         if (state == pci_channel_io_perm_failure)
3020                 return PCI_ERS_RESULT_DISCONNECT;
3021
3022         t3_adapter_error(adapter, 0, 0);
3023
3024         /* Request a slot reset. */
3025         return PCI_ERS_RESULT_NEED_RESET;
3026 }
3027
3028 /**
3029  * t3_io_slot_reset - called after the pci bus has been reset.
3030  * @pdev: Pointer to PCI device
3031  *
3032  * Restart the card from scratch, as if from a cold-boot.
3033  */
3034 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3035 {
3036         struct adapter *adapter = pci_get_drvdata(pdev);
3037
3038         if (!t3_reenable_adapter(adapter))
3039                 return PCI_ERS_RESULT_RECOVERED;
3040
3041         return PCI_ERS_RESULT_DISCONNECT;
3042 }
3043
3044 /**
3045  * t3_io_resume - called when traffic can start flowing again.
3046  * @pdev: Pointer to PCI device
3047  *
3048  * This callback is called when the error recovery driver tells us that
3049  * its OK to resume normal operation.
3050  */
3051 static void t3_io_resume(struct pci_dev *pdev)
3052 {
3053         struct adapter *adapter = pci_get_drvdata(pdev);
3054
3055         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3056                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
3057
3058         rtnl_lock();
3059         t3_resume_ports(adapter);
3060         rtnl_unlock();
3061 }
3062
3063 static const struct pci_error_handlers t3_err_handler = {
3064         .error_detected = t3_io_error_detected,
3065         .slot_reset = t3_io_slot_reset,
3066         .resume = t3_io_resume,
3067 };
3068
3069 /*
3070  * Set the number of qsets based on the number of CPUs and the number of ports,
3071  * not to exceed the number of available qsets, assuming there are enough qsets
3072  * per port in HW.
3073  */
3074 static void set_nqsets(struct adapter *adap)
3075 {
3076         int i, j = 0;
3077         int num_cpus = netif_get_num_default_rss_queues();
3078         int hwports = adap->params.nports;
3079         int nqsets = adap->msix_nvectors - 1;
3080
3081         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3082                 if (hwports == 2 &&
3083                     (hwports * nqsets > SGE_QSETS ||
3084                      num_cpus >= nqsets / hwports))
3085                         nqsets /= hwports;
3086                 if (nqsets > num_cpus)
3087                         nqsets = num_cpus;
3088                 if (nqsets < 1 || hwports == 4)
3089                         nqsets = 1;
3090         } else
3091                 nqsets = 1;
3092
3093         for_each_port(adap, i) {
3094                 struct port_info *pi = adap2pinfo(adap, i);
3095
3096                 pi->first_qset = j;
3097                 pi->nqsets = nqsets;
3098                 j = pi->first_qset + nqsets;
3099
3100                 dev_info(&adap->pdev->dev,
3101                          "Port %d using %d queue sets.\n", i, nqsets);
3102         }
3103 }
3104
3105 static int cxgb_enable_msix(struct adapter *adap)
3106 {
3107         struct msix_entry entries[SGE_QSETS + 1];
3108         int vectors;
3109         int i;
3110
3111         vectors = ARRAY_SIZE(entries);
3112         for (i = 0; i < vectors; ++i)
3113                 entries[i].entry = i;
3114
3115         vectors = pci_enable_msix_range(adap->pdev, entries,
3116                                         adap->params.nports + 1, vectors);
3117         if (vectors < 0)
3118                 return vectors;
3119
3120         for (i = 0; i < vectors; ++i)
3121                 adap->msix_info[i].vec = entries[i].vector;
3122         adap->msix_nvectors = vectors;
3123
3124         return 0;
3125 }
3126
3127 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3128 {
3129         static const char *pci_variant[] = {
3130                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3131         };
3132
3133         int i;
3134         char buf[80];
3135
3136         if (is_pcie(adap))
3137                 snprintf(buf, sizeof(buf), "%s x%d",
3138                          pci_variant[adap->params.pci.variant],
3139                          adap->params.pci.width);
3140         else
3141                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3142                          pci_variant[adap->params.pci.variant],
3143                          adap->params.pci.speed, adap->params.pci.width);
3144
3145         for_each_port(adap, i) {
3146                 struct net_device *dev = adap->port[i];
3147                 const struct port_info *pi = netdev_priv(dev);
3148
3149                 if (!test_bit(i, &adap->registered_device_map))
3150                         continue;
3151                 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3152                             ai->desc, pi->phy.desc,
3153                             is_offload(adap) ? "R" : "", adap->params.rev, buf,
3154                             (adap->flags & USING_MSIX) ? " MSI-X" :
3155                             (adap->flags & USING_MSI) ? " MSI" : "");
3156                 if (adap->name == dev->name && adap->params.vpd.mclk)
3157                         pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3158                                adap->name, t3_mc7_size(&adap->cm) >> 20,
3159                                t3_mc7_size(&adap->pmtx) >> 20,
3160                                t3_mc7_size(&adap->pmrx) >> 20,
3161                                adap->params.vpd.sn);
3162         }
3163 }
3164
3165 static const struct net_device_ops cxgb_netdev_ops = {
3166         .ndo_open               = cxgb_open,
3167         .ndo_stop               = cxgb_close,
3168         .ndo_start_xmit         = t3_eth_xmit,
3169         .ndo_get_stats          = cxgb_get_stats,
3170         .ndo_validate_addr      = eth_validate_addr,
3171         .ndo_set_rx_mode        = cxgb_set_rxmode,
3172         .ndo_do_ioctl           = cxgb_ioctl,
3173         .ndo_change_mtu         = cxgb_change_mtu,
3174         .ndo_set_mac_address    = cxgb_set_mac_addr,
3175         .ndo_fix_features       = cxgb_fix_features,
3176         .ndo_set_features       = cxgb_set_features,
3177 #ifdef CONFIG_NET_POLL_CONTROLLER
3178         .ndo_poll_controller    = cxgb_netpoll,
3179 #endif
3180 };
3181
3182 static void cxgb3_init_iscsi_mac(struct net_device *dev)
3183 {
3184         struct port_info *pi = netdev_priv(dev);
3185
3186         memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3187         pi->iscsic.mac_addr[3] |= 0x80;
3188 }
3189
3190 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3191 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3192                         NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3193 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3194 {
3195         int i, err, pci_using_dac = 0;
3196         resource_size_t mmio_start, mmio_len;
3197         const struct adapter_info *ai;
3198         struct adapter *adapter = NULL;
3199         struct port_info *pi;
3200
3201         pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3202
3203         if (!cxgb3_wq) {
3204                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3205                 if (!cxgb3_wq) {
3206                         pr_err("cannot initialize work queue\n");
3207                         return -ENOMEM;
3208                 }
3209         }
3210
3211         err = pci_enable_device(pdev);
3212         if (err) {
3213                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3214                 goto out;
3215         }
3216
3217         err = pci_request_regions(pdev, DRV_NAME);
3218         if (err) {
3219                 /* Just info, some other driver may have claimed the device. */
3220                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3221                 goto out_disable_device;
3222         }
3223
3224         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3225                 pci_using_dac = 1;
3226                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3227                 if (err) {
3228                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3229                                "coherent allocations\n");
3230                         goto out_release_regions;
3231                 }
3232         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3233                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3234                 goto out_release_regions;
3235         }
3236
3237         pci_set_master(pdev);
3238         pci_save_state(pdev);
3239
3240         mmio_start = pci_resource_start(pdev, 0);
3241         mmio_len = pci_resource_len(pdev, 0);
3242         ai = t3_get_adapter_info(ent->driver_data);
3243
3244         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3245         if (!adapter) {
3246                 err = -ENOMEM;
3247                 goto out_release_regions;
3248         }
3249
3250         adapter->nofail_skb =
3251                 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3252         if (!adapter->nofail_skb) {
3253                 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3254                 err = -ENOMEM;
3255                 goto out_free_adapter;
3256         }
3257
3258         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3259         if (!adapter->regs) {
3260                 dev_err(&pdev->dev, "cannot map device registers\n");
3261                 err = -ENOMEM;
3262                 goto out_free_adapter_nofail;
3263         }
3264
3265         adapter->pdev = pdev;
3266         adapter->name = pci_name(pdev);
3267         adapter->msg_enable = dflt_msg_enable;
3268         adapter->mmio_len = mmio_len;
3269
3270         mutex_init(&adapter->mdio_lock);
3271         spin_lock_init(&adapter->work_lock);
3272         spin_lock_init(&adapter->stats_lock);
3273
3274         INIT_LIST_HEAD(&adapter->adapter_list);
3275         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3276         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3277
3278         INIT_WORK(&adapter->db_full_task, db_full_task);
3279         INIT_WORK(&adapter->db_empty_task, db_empty_task);
3280         INIT_WORK(&adapter->db_drop_task, db_drop_task);
3281
3282         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3283
3284         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3285                 struct net_device *netdev;
3286
3287                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3288                 if (!netdev) {
3289                         err = -ENOMEM;
3290                         goto out_free_dev;
3291                 }
3292
3293                 SET_NETDEV_DEV(netdev, &pdev->dev);
3294
3295                 adapter->port[i] = netdev;
3296                 pi = netdev_priv(netdev);
3297                 pi->adapter = adapter;
3298                 pi->port_id = i;
3299                 netif_carrier_off(netdev);
3300                 netdev->irq = pdev->irq;
3301                 netdev->mem_start = mmio_start;
3302                 netdev->mem_end = mmio_start + mmio_len - 1;
3303                 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3304                         NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3305                 netdev->features |= netdev->hw_features |
3306                                     NETIF_F_HW_VLAN_CTAG_TX;
3307                 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3308                 if (pci_using_dac)
3309                         netdev->features |= NETIF_F_HIGHDMA;
3310
3311                 netdev->netdev_ops = &cxgb_netdev_ops;
3312                 netdev->ethtool_ops = &cxgb_ethtool_ops;
3313         }
3314
3315         pci_set_drvdata(pdev, adapter);
3316         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3317                 err = -ENODEV;
3318                 goto out_free_dev;
3319         }
3320
3321         /*
3322          * The card is now ready to go.  If any errors occur during device
3323          * registration we do not fail the whole card but rather proceed only
3324          * with the ports we manage to register successfully.  However we must
3325          * register at least one net device.
3326          */
3327         for_each_port(adapter, i) {
3328                 err = register_netdev(adapter->port[i]);
3329                 if (err)
3330                         dev_warn(&pdev->dev,
3331                                  "cannot register net device %s, skipping\n",
3332                                  adapter->port[i]->name);
3333                 else {
3334                         /*
3335                          * Change the name we use for messages to the name of
3336                          * the first successfully registered interface.
3337                          */
3338                         if (!adapter->registered_device_map)
3339                                 adapter->name = adapter->port[i]->name;
3340
3341                         __set_bit(i, &adapter->registered_device_map);
3342                 }
3343         }
3344         if (!adapter->registered_device_map) {
3345                 dev_err(&pdev->dev, "could not register any net devices\n");
3346                 goto out_free_dev;
3347         }
3348
3349         for_each_port(adapter, i)
3350                 cxgb3_init_iscsi_mac(adapter->port[i]);
3351
3352         /* Driver's ready. Reflect it on LEDs */
3353         t3_led_ready(adapter);
3354
3355         if (is_offload(adapter)) {
3356                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3357                 cxgb3_adapter_ofld(adapter);
3358         }
3359
3360         /* See what interrupts we'll be using */
3361         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3362                 adapter->flags |= USING_MSIX;
3363         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3364                 adapter->flags |= USING_MSI;
3365
3366         set_nqsets(adapter);
3367
3368         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3369                                  &cxgb3_attr_group);
3370
3371         print_port_info(adapter, ai);
3372         return 0;
3373
3374 out_free_dev:
3375         iounmap(adapter->regs);
3376         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3377                 if (adapter->port[i])
3378                         free_netdev(adapter->port[i]);
3379
3380 out_free_adapter_nofail:
3381         kfree_skb(adapter->nofail_skb);
3382
3383 out_free_adapter:
3384         kfree(adapter);
3385
3386 out_release_regions:
3387         pci_release_regions(pdev);
3388 out_disable_device:
3389         pci_disable_device(pdev);
3390 out:
3391         return err;
3392 }
3393
3394 static void remove_one(struct pci_dev *pdev)
3395 {
3396         struct adapter *adapter = pci_get_drvdata(pdev);
3397
3398         if (adapter) {
3399                 int i;
3400
3401                 t3_sge_stop(adapter);
3402                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3403                                    &cxgb3_attr_group);
3404
3405                 if (is_offload(adapter)) {
3406                         cxgb3_adapter_unofld(adapter);
3407                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3408                                      &adapter->open_device_map))
3409                                 offload_close(&adapter->tdev);
3410                 }
3411
3412                 for_each_port(adapter, i)
3413                     if (test_bit(i, &adapter->registered_device_map))
3414                         unregister_netdev(adapter->port[i]);
3415
3416                 t3_stop_sge_timers(adapter);
3417                 t3_free_sge_resources(adapter);
3418                 cxgb_disable_msi(adapter);
3419
3420                 for_each_port(adapter, i)
3421                         if (adapter->port[i])
3422                                 free_netdev(adapter->port[i]);
3423
3424                 iounmap(adapter->regs);
3425                 if (adapter->nofail_skb)
3426                         kfree_skb(adapter->nofail_skb);
3427                 kfree(adapter);
3428                 pci_release_regions(pdev);
3429                 pci_disable_device(pdev);
3430         }
3431 }
3432
3433 static struct pci_driver driver = {
3434         .name = DRV_NAME,
3435         .id_table = cxgb3_pci_tbl,
3436         .probe = init_one,
3437         .remove = remove_one,
3438         .err_handler = &t3_err_handler,
3439 };
3440
3441 static int __init cxgb3_init_module(void)
3442 {
3443         int ret;
3444
3445         cxgb3_offload_init();
3446
3447         ret = pci_register_driver(&driver);
3448         return ret;
3449 }
3450
3451 static void __exit cxgb3_cleanup_module(void)
3452 {
3453         pci_unregister_driver(&driver);
3454         if (cxgb3_wq)
3455                 destroy_workqueue(cxgb3_wq);
3456 }
3457
3458 module_init(cxgb3_init_module);
3459 module_exit(cxgb3_cleanup_module);