GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / net / ethernet / chelsio / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/init.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/if_vlan.h>
43 #include <linux/mdio.h>
44 #include <linux/sockios.h>
45 #include <linux/workqueue.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/stringify.h>
51 #include <linux/sched.h>
52 #include <linux/slab.h>
53 #include <linux/uaccess.h>
54 #include <linux/nospec.h>
55
56 #include "common.h"
57 #include "cxgb3_ioctl.h"
58 #include "regs.h"
59 #include "cxgb3_offload.h"
60 #include "version.h"
61
62 #include "cxgb3_ctl_defs.h"
63 #include "t3_cpl.h"
64 #include "firmware_exports.h"
65
66 enum {
67         MAX_TXQ_ENTRIES = 16384,
68         MAX_CTRL_TXQ_ENTRIES = 1024,
69         MAX_RSPQ_ENTRIES = 16384,
70         MAX_RX_BUFFERS = 16384,
71         MAX_RX_JUMBO_BUFFERS = 16384,
72         MIN_TXQ_ENTRIES = 4,
73         MIN_CTRL_TXQ_ENTRIES = 4,
74         MIN_RSPQ_ENTRIES = 32,
75         MIN_FL_ENTRIES = 32
76 };
77
78 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
79
80 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
81                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
82                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
83
84 #define EEPROM_MAGIC 0x38E2F10C
85
86 #define CH_DEVICE(devid, idx) \
87         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
88
89 static const struct pci_device_id cxgb3_pci_tbl[] = {
90         CH_DEVICE(0x20, 0),     /* PE9000 */
91         CH_DEVICE(0x21, 1),     /* T302E */
92         CH_DEVICE(0x22, 2),     /* T310E */
93         CH_DEVICE(0x23, 3),     /* T320X */
94         CH_DEVICE(0x24, 1),     /* T302X */
95         CH_DEVICE(0x25, 3),     /* T320E */
96         CH_DEVICE(0x26, 2),     /* T310X */
97         CH_DEVICE(0x30, 2),     /* T3B10 */
98         CH_DEVICE(0x31, 3),     /* T3B20 */
99         CH_DEVICE(0x32, 1),     /* T3B02 */
100         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
101         CH_DEVICE(0x36, 3),     /* S320E-CR */
102         CH_DEVICE(0x37, 7),     /* N320E-G2 */
103         {0,}
104 };
105
106 MODULE_DESCRIPTION(DRV_DESC);
107 MODULE_AUTHOR("Chelsio Communications");
108 MODULE_LICENSE("Dual BSD/GPL");
109 MODULE_VERSION(DRV_VERSION);
110 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
111
112 static int dflt_msg_enable = DFLT_MSG_ENABLE;
113
114 module_param(dflt_msg_enable, int, 0644);
115 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
116
117 /*
118  * The driver uses the best interrupt scheme available on a platform in the
119  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
120  * of these schemes the driver may consider as follows:
121  *
122  * msi = 2: choose from among all three options
123  * msi = 1: only consider MSI and pin interrupts
124  * msi = 0: force pin interrupts
125  */
126 static int msi = 2;
127
128 module_param(msi, int, 0644);
129 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
130
131 /*
132  * The driver enables offload as a default.
133  * To disable it, use ofld_disable = 1.
134  */
135
136 static int ofld_disable = 0;
137
138 module_param(ofld_disable, int, 0644);
139 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140
141 /*
142  * We have work elements that we need to cancel when an interface is taken
143  * down.  Normally the work elements would be executed by keventd but that
144  * can deadlock because of linkwatch.  If our close method takes the rtnl
145  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
146  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
147  * for our work to complete.  Get our own work queue to solve this.
148  */
149 struct workqueue_struct *cxgb3_wq;
150
151 /**
152  *      link_report - show link status and link speed/duplex
153  *      @p: the port whose settings are to be reported
154  *
155  *      Shows the link status, speed, and duplex of a port.
156  */
157 static void link_report(struct net_device *dev)
158 {
159         if (!netif_carrier_ok(dev))
160                 netdev_info(dev, "link down\n");
161         else {
162                 const char *s = "10Mbps";
163                 const struct port_info *p = netdev_priv(dev);
164
165                 switch (p->link_config.speed) {
166                 case SPEED_10000:
167                         s = "10Gbps";
168                         break;
169                 case SPEED_1000:
170                         s = "1000Mbps";
171                         break;
172                 case SPEED_100:
173                         s = "100Mbps";
174                         break;
175                 }
176
177                 netdev_info(dev, "link up, %s, %s-duplex\n",
178                             s, p->link_config.duplex == DUPLEX_FULL
179                             ? "full" : "half");
180         }
181 }
182
183 static void enable_tx_fifo_drain(struct adapter *adapter,
184                                  struct port_info *pi)
185 {
186         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
187                          F_ENDROPPKT);
188         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
189         t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
190         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
191 }
192
193 static void disable_tx_fifo_drain(struct adapter *adapter,
194                                   struct port_info *pi)
195 {
196         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
197                          F_ENDROPPKT, 0);
198 }
199
200 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
201 {
202         struct net_device *dev = adap->port[port_id];
203         struct port_info *pi = netdev_priv(dev);
204
205         if (state == netif_carrier_ok(dev))
206                 return;
207
208         if (state) {
209                 struct cmac *mac = &pi->mac;
210
211                 netif_carrier_on(dev);
212
213                 disable_tx_fifo_drain(adap, pi);
214
215                 /* Clear local faults */
216                 t3_xgm_intr_disable(adap, pi->port_id);
217                 t3_read_reg(adap, A_XGM_INT_STATUS +
218                                     pi->mac.offset);
219                 t3_write_reg(adap,
220                              A_XGM_INT_CAUSE + pi->mac.offset,
221                              F_XGM_INT);
222
223                 t3_set_reg_field(adap,
224                                  A_XGM_INT_ENABLE +
225                                  pi->mac.offset,
226                                  F_XGM_INT, F_XGM_INT);
227                 t3_xgm_intr_enable(adap, pi->port_id);
228
229                 t3_mac_enable(mac, MAC_DIRECTION_TX);
230         } else {
231                 netif_carrier_off(dev);
232
233                 /* Flush TX FIFO */
234                 enable_tx_fifo_drain(adap, pi);
235         }
236         link_report(dev);
237 }
238
239 /**
240  *      t3_os_link_changed - handle link status changes
241  *      @adapter: the adapter associated with the link change
242  *      @port_id: the port index whose limk status has changed
243  *      @link_stat: the new status of the link
244  *      @speed: the new speed setting
245  *      @duplex: the new duplex setting
246  *      @pause: the new flow-control setting
247  *
248  *      This is the OS-dependent handler for link status changes.  The OS
249  *      neutral handler takes care of most of the processing for these events,
250  *      then calls this handler for any OS-specific processing.
251  */
252 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
253                         int speed, int duplex, int pause)
254 {
255         struct net_device *dev = adapter->port[port_id];
256         struct port_info *pi = netdev_priv(dev);
257         struct cmac *mac = &pi->mac;
258
259         /* Skip changes from disabled ports. */
260         if (!netif_running(dev))
261                 return;
262
263         if (link_stat != netif_carrier_ok(dev)) {
264                 if (link_stat) {
265                         disable_tx_fifo_drain(adapter, pi);
266
267                         t3_mac_enable(mac, MAC_DIRECTION_RX);
268
269                         /* Clear local faults */
270                         t3_xgm_intr_disable(adapter, pi->port_id);
271                         t3_read_reg(adapter, A_XGM_INT_STATUS +
272                                     pi->mac.offset);
273                         t3_write_reg(adapter,
274                                      A_XGM_INT_CAUSE + pi->mac.offset,
275                                      F_XGM_INT);
276
277                         t3_set_reg_field(adapter,
278                                          A_XGM_INT_ENABLE + pi->mac.offset,
279                                          F_XGM_INT, F_XGM_INT);
280                         t3_xgm_intr_enable(adapter, pi->port_id);
281
282                         netif_carrier_on(dev);
283                 } else {
284                         netif_carrier_off(dev);
285
286                         t3_xgm_intr_disable(adapter, pi->port_id);
287                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
288                         t3_set_reg_field(adapter,
289                                          A_XGM_INT_ENABLE + pi->mac.offset,
290                                          F_XGM_INT, 0);
291
292                         if (is_10G(adapter))
293                                 pi->phy.ops->power_down(&pi->phy, 1);
294
295                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
296                         t3_mac_disable(mac, MAC_DIRECTION_RX);
297                         t3_link_start(&pi->phy, mac, &pi->link_config);
298
299                         /* Flush TX FIFO */
300                         enable_tx_fifo_drain(adapter, pi);
301                 }
302
303                 link_report(dev);
304         }
305 }
306
307 /**
308  *      t3_os_phymod_changed - handle PHY module changes
309  *      @phy: the PHY reporting the module change
310  *      @mod_type: new module type
311  *
312  *      This is the OS-dependent handler for PHY module changes.  It is
313  *      invoked when a PHY module is removed or inserted for any OS-specific
314  *      processing.
315  */
316 void t3_os_phymod_changed(struct adapter *adap, int port_id)
317 {
318         static const char *mod_str[] = {
319                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
320         };
321
322         const struct net_device *dev = adap->port[port_id];
323         const struct port_info *pi = netdev_priv(dev);
324
325         if (pi->phy.modtype == phy_modtype_none)
326                 netdev_info(dev, "PHY module unplugged\n");
327         else
328                 netdev_info(dev, "%s PHY module inserted\n",
329                             mod_str[pi->phy.modtype]);
330 }
331
332 static void cxgb_set_rxmode(struct net_device *dev)
333 {
334         struct port_info *pi = netdev_priv(dev);
335
336         t3_mac_set_rx_mode(&pi->mac, dev);
337 }
338
339 /**
340  *      link_start - enable a port
341  *      @dev: the device to enable
342  *
343  *      Performs the MAC and PHY actions needed to enable a port.
344  */
345 static void link_start(struct net_device *dev)
346 {
347         struct port_info *pi = netdev_priv(dev);
348         struct cmac *mac = &pi->mac;
349
350         t3_mac_reset(mac);
351         t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
352         t3_mac_set_mtu(mac, dev->mtu);
353         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
354         t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
355         t3_mac_set_rx_mode(mac, dev);
356         t3_link_start(&pi->phy, mac, &pi->link_config);
357         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
358 }
359
360 static inline void cxgb_disable_msi(struct adapter *adapter)
361 {
362         if (adapter->flags & USING_MSIX) {
363                 pci_disable_msix(adapter->pdev);
364                 adapter->flags &= ~USING_MSIX;
365         } else if (adapter->flags & USING_MSI) {
366                 pci_disable_msi(adapter->pdev);
367                 adapter->flags &= ~USING_MSI;
368         }
369 }
370
371 /*
372  * Interrupt handler for asynchronous events used with MSI-X.
373  */
374 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
375 {
376         t3_slow_intr_handler(cookie);
377         return IRQ_HANDLED;
378 }
379
380 /*
381  * Name the MSI-X interrupts.
382  */
383 static void name_msix_vecs(struct adapter *adap)
384 {
385         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
386
387         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
388         adap->msix_info[0].desc[n] = 0;
389
390         for_each_port(adap, j) {
391                 struct net_device *d = adap->port[j];
392                 const struct port_info *pi = netdev_priv(d);
393
394                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
395                         snprintf(adap->msix_info[msi_idx].desc, n,
396                                  "%s-%d", d->name, pi->first_qset + i);
397                         adap->msix_info[msi_idx].desc[n] = 0;
398                 }
399         }
400 }
401
402 static int request_msix_data_irqs(struct adapter *adap)
403 {
404         int i, j, err, qidx = 0;
405
406         for_each_port(adap, i) {
407                 int nqsets = adap2pinfo(adap, i)->nqsets;
408
409                 for (j = 0; j < nqsets; ++j) {
410                         err = request_irq(adap->msix_info[qidx + 1].vec,
411                                           t3_intr_handler(adap,
412                                                           adap->sge.qs[qidx].
413                                                           rspq.polling), 0,
414                                           adap->msix_info[qidx + 1].desc,
415                                           &adap->sge.qs[qidx]);
416                         if (err) {
417                                 while (--qidx >= 0)
418                                         free_irq(adap->msix_info[qidx + 1].vec,
419                                                  &adap->sge.qs[qidx]);
420                                 return err;
421                         }
422                         qidx++;
423                 }
424         }
425         return 0;
426 }
427
428 static void free_irq_resources(struct adapter *adapter)
429 {
430         if (adapter->flags & USING_MSIX) {
431                 int i, n = 0;
432
433                 free_irq(adapter->msix_info[0].vec, adapter);
434                 for_each_port(adapter, i)
435                         n += adap2pinfo(adapter, i)->nqsets;
436
437                 for (i = 0; i < n; ++i)
438                         free_irq(adapter->msix_info[i + 1].vec,
439                                  &adapter->sge.qs[i]);
440         } else
441                 free_irq(adapter->pdev->irq, adapter);
442 }
443
444 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
445                               unsigned long n)
446 {
447         int attempts = 10;
448
449         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
450                 if (!--attempts)
451                         return -ETIMEDOUT;
452                 msleep(10);
453         }
454         return 0;
455 }
456
457 static int init_tp_parity(struct adapter *adap)
458 {
459         int i;
460         struct sk_buff *skb;
461         struct cpl_set_tcb_field *greq;
462         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
463
464         t3_tp_set_offload_mode(adap, 1);
465
466         for (i = 0; i < 16; i++) {
467                 struct cpl_smt_write_req *req;
468
469                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
470                 if (!skb)
471                         skb = adap->nofail_skb;
472                 if (!skb)
473                         goto alloc_skb_fail;
474
475                 req = __skb_put_zero(skb, sizeof(*req));
476                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
477                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
478                 req->mtu_idx = NMTUS - 1;
479                 req->iff = i;
480                 t3_mgmt_tx(adap, skb);
481                 if (skb == adap->nofail_skb) {
482                         await_mgmt_replies(adap, cnt, i + 1);
483                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
484                         if (!adap->nofail_skb)
485                                 goto alloc_skb_fail;
486                 }
487         }
488
489         for (i = 0; i < 2048; i++) {
490                 struct cpl_l2t_write_req *req;
491
492                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
493                 if (!skb)
494                         skb = adap->nofail_skb;
495                 if (!skb)
496                         goto alloc_skb_fail;
497
498                 req = __skb_put_zero(skb, sizeof(*req));
499                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
500                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
501                 req->params = htonl(V_L2T_W_IDX(i));
502                 t3_mgmt_tx(adap, skb);
503                 if (skb == adap->nofail_skb) {
504                         await_mgmt_replies(adap, cnt, 16 + i + 1);
505                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
506                         if (!adap->nofail_skb)
507                                 goto alloc_skb_fail;
508                 }
509         }
510
511         for (i = 0; i < 2048; i++) {
512                 struct cpl_rte_write_req *req;
513
514                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
515                 if (!skb)
516                         skb = adap->nofail_skb;
517                 if (!skb)
518                         goto alloc_skb_fail;
519
520                 req = __skb_put_zero(skb, sizeof(*req));
521                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
522                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
523                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
524                 t3_mgmt_tx(adap, skb);
525                 if (skb == adap->nofail_skb) {
526                         await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
527                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
528                         if (!adap->nofail_skb)
529                                 goto alloc_skb_fail;
530                 }
531         }
532
533         skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
534         if (!skb)
535                 skb = adap->nofail_skb;
536         if (!skb)
537                 goto alloc_skb_fail;
538
539         greq = __skb_put_zero(skb, sizeof(*greq));
540         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
541         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
542         greq->mask = cpu_to_be64(1);
543         t3_mgmt_tx(adap, skb);
544
545         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
546         if (skb == adap->nofail_skb) {
547                 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
548                 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
549         }
550
551         t3_tp_set_offload_mode(adap, 0);
552         return i;
553
554 alloc_skb_fail:
555         t3_tp_set_offload_mode(adap, 0);
556         return -ENOMEM;
557 }
558
559 /**
560  *      setup_rss - configure RSS
561  *      @adap: the adapter
562  *
563  *      Sets up RSS to distribute packets to multiple receive queues.  We
564  *      configure the RSS CPU lookup table to distribute to the number of HW
565  *      receive queues, and the response queue lookup table to narrow that
566  *      down to the response queues actually configured for each port.
567  *      We always configure the RSS mapping for two ports since the mapping
568  *      table has plenty of entries.
569  */
570 static void setup_rss(struct adapter *adap)
571 {
572         int i;
573         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
574         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
575         u8 cpus[SGE_QSETS + 1];
576         u16 rspq_map[RSS_TABLE_SIZE + 1];
577
578         for (i = 0; i < SGE_QSETS; ++i)
579                 cpus[i] = i;
580         cpus[SGE_QSETS] = 0xff; /* terminator */
581
582         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
583                 rspq_map[i] = i % nq0;
584                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
585         }
586         rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
587
588         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
589                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
590                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
591 }
592
593 static void ring_dbs(struct adapter *adap)
594 {
595         int i, j;
596
597         for (i = 0; i < SGE_QSETS; i++) {
598                 struct sge_qset *qs = &adap->sge.qs[i];
599
600                 if (qs->adap)
601                         for (j = 0; j < SGE_TXQ_PER_SET; j++)
602                                 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
603         }
604 }
605
606 static void init_napi(struct adapter *adap)
607 {
608         int i;
609
610         for (i = 0; i < SGE_QSETS; i++) {
611                 struct sge_qset *qs = &adap->sge.qs[i];
612
613                 if (qs->adap)
614                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
615                                        64);
616         }
617
618         /*
619          * netif_napi_add() can be called only once per napi_struct because it
620          * adds each new napi_struct to a list.  Be careful not to call it a
621          * second time, e.g., during EEH recovery, by making a note of it.
622          */
623         adap->flags |= NAPI_INIT;
624 }
625
626 /*
627  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
628  * both netdevices representing interfaces and the dummy ones for the extra
629  * queues.
630  */
631 static void quiesce_rx(struct adapter *adap)
632 {
633         int i;
634
635         for (i = 0; i < SGE_QSETS; i++)
636                 if (adap->sge.qs[i].adap)
637                         napi_disable(&adap->sge.qs[i].napi);
638 }
639
640 static void enable_all_napi(struct adapter *adap)
641 {
642         int i;
643         for (i = 0; i < SGE_QSETS; i++)
644                 if (adap->sge.qs[i].adap)
645                         napi_enable(&adap->sge.qs[i].napi);
646 }
647
648 /**
649  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
650  *      @adap: the adapter
651  *
652  *      Determines how many sets of SGE queues to use and initializes them.
653  *      We support multiple queue sets per port if we have MSI-X, otherwise
654  *      just one queue set per port.
655  */
656 static int setup_sge_qsets(struct adapter *adap)
657 {
658         int i, j, err, irq_idx = 0, qset_idx = 0;
659         unsigned int ntxq = SGE_TXQ_PER_SET;
660
661         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
662                 irq_idx = -1;
663
664         for_each_port(adap, i) {
665                 struct net_device *dev = adap->port[i];
666                 struct port_info *pi = netdev_priv(dev);
667
668                 pi->qs = &adap->sge.qs[pi->first_qset];
669                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
670                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
671                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
672                                                              irq_idx,
673                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
674                                 netdev_get_tx_queue(dev, j));
675                         if (err) {
676                                 t3_free_sge_resources(adap);
677                                 return err;
678                         }
679                 }
680         }
681
682         return 0;
683 }
684
685 static ssize_t attr_show(struct device *d, char *buf,
686                          ssize_t(*format) (struct net_device *, char *))
687 {
688         ssize_t len;
689
690         /* Synchronize with ioctls that may shut down the device */
691         rtnl_lock();
692         len = (*format) (to_net_dev(d), buf);
693         rtnl_unlock();
694         return len;
695 }
696
697 static ssize_t attr_store(struct device *d,
698                           const char *buf, size_t len,
699                           ssize_t(*set) (struct net_device *, unsigned int),
700                           unsigned int min_val, unsigned int max_val)
701 {
702         ssize_t ret;
703         unsigned int val;
704
705         if (!capable(CAP_NET_ADMIN))
706                 return -EPERM;
707
708         ret = kstrtouint(buf, 0, &val);
709         if (ret)
710                 return ret;
711         if (val < min_val || val > max_val)
712                 return -EINVAL;
713
714         rtnl_lock();
715         ret = (*set) (to_net_dev(d), val);
716         if (!ret)
717                 ret = len;
718         rtnl_unlock();
719         return ret;
720 }
721
722 #define CXGB3_SHOW(name, val_expr) \
723 static ssize_t format_##name(struct net_device *dev, char *buf) \
724 { \
725         struct port_info *pi = netdev_priv(dev); \
726         struct adapter *adap = pi->adapter; \
727         return sprintf(buf, "%u\n", val_expr); \
728 } \
729 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
730                            char *buf) \
731 { \
732         return attr_show(d, buf, format_##name); \
733 }
734
735 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
736 {
737         struct port_info *pi = netdev_priv(dev);
738         struct adapter *adap = pi->adapter;
739         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
740
741         if (adap->flags & FULL_INIT_DONE)
742                 return -EBUSY;
743         if (val && adap->params.rev == 0)
744                 return -EINVAL;
745         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
746             min_tids)
747                 return -EINVAL;
748         adap->params.mc5.nfilters = val;
749         return 0;
750 }
751
752 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
753                               const char *buf, size_t len)
754 {
755         return attr_store(d, buf, len, set_nfilters, 0, ~0);
756 }
757
758 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
759 {
760         struct port_info *pi = netdev_priv(dev);
761         struct adapter *adap = pi->adapter;
762
763         if (adap->flags & FULL_INIT_DONE)
764                 return -EBUSY;
765         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
766             MC5_MIN_TIDS)
767                 return -EINVAL;
768         adap->params.mc5.nservers = val;
769         return 0;
770 }
771
772 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
773                               const char *buf, size_t len)
774 {
775         return attr_store(d, buf, len, set_nservers, 0, ~0);
776 }
777
778 #define CXGB3_ATTR_R(name, val_expr) \
779 CXGB3_SHOW(name, val_expr) \
780 static DEVICE_ATTR(name, 0444, show_##name, NULL)
781
782 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
783 CXGB3_SHOW(name, val_expr) \
784 static DEVICE_ATTR(name, 0644, show_##name, store_method)
785
786 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
787 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
788 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
789
790 static struct attribute *cxgb3_attrs[] = {
791         &dev_attr_cam_size.attr,
792         &dev_attr_nfilters.attr,
793         &dev_attr_nservers.attr,
794         NULL
795 };
796
797 static const struct attribute_group cxgb3_attr_group = {
798         .attrs = cxgb3_attrs,
799 };
800
801 static ssize_t tm_attr_show(struct device *d,
802                             char *buf, int sched)
803 {
804         struct port_info *pi = netdev_priv(to_net_dev(d));
805         struct adapter *adap = pi->adapter;
806         unsigned int v, addr, bpt, cpt;
807         ssize_t len;
808
809         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
810         rtnl_lock();
811         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
812         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
813         if (sched & 1)
814                 v >>= 16;
815         bpt = (v >> 8) & 0xff;
816         cpt = v & 0xff;
817         if (!cpt)
818                 len = sprintf(buf, "disabled\n");
819         else {
820                 v = (adap->params.vpd.cclk * 1000) / cpt;
821                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
822         }
823         rtnl_unlock();
824         return len;
825 }
826
827 static ssize_t tm_attr_store(struct device *d,
828                              const char *buf, size_t len, int sched)
829 {
830         struct port_info *pi = netdev_priv(to_net_dev(d));
831         struct adapter *adap = pi->adapter;
832         unsigned int val;
833         ssize_t ret;
834
835         if (!capable(CAP_NET_ADMIN))
836                 return -EPERM;
837
838         ret = kstrtouint(buf, 0, &val);
839         if (ret)
840                 return ret;
841         if (val > 10000000)
842                 return -EINVAL;
843
844         rtnl_lock();
845         ret = t3_config_sched(adap, val, sched);
846         if (!ret)
847                 ret = len;
848         rtnl_unlock();
849         return ret;
850 }
851
852 #define TM_ATTR(name, sched) \
853 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
854                            char *buf) \
855 { \
856         return tm_attr_show(d, buf, sched); \
857 } \
858 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
859                             const char *buf, size_t len) \
860 { \
861         return tm_attr_store(d, buf, len, sched); \
862 } \
863 static DEVICE_ATTR(name, 0644, show_##name, store_##name)
864
865 TM_ATTR(sched0, 0);
866 TM_ATTR(sched1, 1);
867 TM_ATTR(sched2, 2);
868 TM_ATTR(sched3, 3);
869 TM_ATTR(sched4, 4);
870 TM_ATTR(sched5, 5);
871 TM_ATTR(sched6, 6);
872 TM_ATTR(sched7, 7);
873
874 static struct attribute *offload_attrs[] = {
875         &dev_attr_sched0.attr,
876         &dev_attr_sched1.attr,
877         &dev_attr_sched2.attr,
878         &dev_attr_sched3.attr,
879         &dev_attr_sched4.attr,
880         &dev_attr_sched5.attr,
881         &dev_attr_sched6.attr,
882         &dev_attr_sched7.attr,
883         NULL
884 };
885
886 static const struct attribute_group offload_attr_group = {
887         .attrs = offload_attrs,
888 };
889
890 /*
891  * Sends an sk_buff to an offload queue driver
892  * after dealing with any active network taps.
893  */
894 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
895 {
896         int ret;
897
898         local_bh_disable();
899         ret = t3_offload_tx(tdev, skb);
900         local_bh_enable();
901         return ret;
902 }
903
904 static int write_smt_entry(struct adapter *adapter, int idx)
905 {
906         struct cpl_smt_write_req *req;
907         struct port_info *pi = netdev_priv(adapter->port[idx]);
908         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
909
910         if (!skb)
911                 return -ENOMEM;
912
913         req = __skb_put(skb, sizeof(*req));
914         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
915         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
916         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
917         req->iff = idx;
918         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
919         memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
920         skb->priority = 1;
921         offload_tx(&adapter->tdev, skb);
922         return 0;
923 }
924
925 static int init_smt(struct adapter *adapter)
926 {
927         int i;
928
929         for_each_port(adapter, i)
930             write_smt_entry(adapter, i);
931         return 0;
932 }
933
934 static void init_port_mtus(struct adapter *adapter)
935 {
936         unsigned int mtus = adapter->port[0]->mtu;
937
938         if (adapter->port[1])
939                 mtus |= adapter->port[1]->mtu << 16;
940         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
941 }
942
943 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
944                               int hi, int port)
945 {
946         struct sk_buff *skb;
947         struct mngt_pktsched_wr *req;
948         int ret;
949
950         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
951         if (!skb)
952                 skb = adap->nofail_skb;
953         if (!skb)
954                 return -ENOMEM;
955
956         req = skb_put(skb, sizeof(*req));
957         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
958         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
959         req->sched = sched;
960         req->idx = qidx;
961         req->min = lo;
962         req->max = hi;
963         req->binding = port;
964         ret = t3_mgmt_tx(adap, skb);
965         if (skb == adap->nofail_skb) {
966                 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
967                                              GFP_KERNEL);
968                 if (!adap->nofail_skb)
969                         ret = -ENOMEM;
970         }
971
972         return ret;
973 }
974
975 static int bind_qsets(struct adapter *adap)
976 {
977         int i, j, err = 0;
978
979         for_each_port(adap, i) {
980                 const struct port_info *pi = adap2pinfo(adap, i);
981
982                 for (j = 0; j < pi->nqsets; ++j) {
983                         int ret = send_pktsched_cmd(adap, 1,
984                                                     pi->first_qset + j, -1,
985                                                     -1, i);
986                         if (ret)
987                                 err = ret;
988                 }
989         }
990
991         return err;
992 }
993
994 /*(DEBLOBBED)*/
995 #define FW_FNAME "/*(DEBLOBBED)*/"
996 /*(DEBLOBBED)*/
997 #define TPSRAM_NAME "/*(DEBLOBBED)*/"
998 #define AEL2005_OPT_EDC_NAME "/*(DEBLOBBED)*/"
999 #define AEL2005_TWX_EDC_NAME "/*(DEBLOBBED)*/"
1000 #define AEL2020_TWX_EDC_NAME "/*(DEBLOBBED)*/"
1001 /*(DEBLOBBED)*/
1002
1003 static inline const char *get_edc_fw_name(int edc_idx)
1004 {
1005         const char *fw_name = NULL;
1006
1007         switch (edc_idx) {
1008         case EDC_OPT_AEL2005:
1009                 fw_name = AEL2005_OPT_EDC_NAME;
1010                 break;
1011         case EDC_TWX_AEL2005:
1012                 fw_name = AEL2005_TWX_EDC_NAME;
1013                 break;
1014         case EDC_TWX_AEL2020:
1015                 fw_name = AEL2020_TWX_EDC_NAME;
1016                 break;
1017         }
1018         return fw_name;
1019 }
1020
1021 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1022 {
1023         struct adapter *adapter = phy->adapter;
1024         const struct firmware *fw;
1025         const char *fw_name;
1026         u32 csum;
1027         const __be32 *p;
1028         u16 *cache = phy->phy_cache;
1029         int i, ret = -EINVAL;
1030
1031         fw_name = get_edc_fw_name(edc_idx);
1032         if (fw_name)
1033                 ret = reject_firmware(&fw, fw_name, &adapter->pdev->dev);
1034         if (ret < 0) {
1035                 dev_err(&adapter->pdev->dev,
1036                         "could not upgrade firmware: unable to load %s\n",
1037                         fw_name);
1038                 return ret;
1039         }
1040
1041         /* check size, take checksum in account */
1042         if (fw->size > size + 4) {
1043                 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1044                        (unsigned int)fw->size, size + 4);
1045                 ret = -EINVAL;
1046         }
1047
1048         /* compute checksum */
1049         p = (const __be32 *)fw->data;
1050         for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1051                 csum += ntohl(p[i]);
1052
1053         if (csum != 0xffffffff) {
1054                 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1055                        csum);
1056                 ret = -EINVAL;
1057         }
1058
1059         for (i = 0; i < size / 4 ; i++) {
1060                 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1061                 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1062         }
1063
1064         release_firmware(fw);
1065
1066         return ret;
1067 }
1068
1069 static int upgrade_fw(struct adapter *adap)
1070 {
1071         int ret;
1072         const struct firmware *fw;
1073         struct device *dev = &adap->pdev->dev;
1074
1075         ret = reject_firmware(&fw, FW_FNAME, dev);
1076         if (ret < 0) {
1077                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1078                         FW_FNAME);
1079                 return ret;
1080         }
1081         ret = t3_load_fw(adap, fw->data, fw->size);
1082         release_firmware(fw);
1083
1084         if (ret == 0)
1085                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1086                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1087         else
1088                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1089                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1090
1091         return ret;
1092 }
1093
1094 static inline char t3rev2char(struct adapter *adapter)
1095 {
1096         char rev = 0;
1097
1098         switch(adapter->params.rev) {
1099         case T3_REV_B:
1100         case T3_REV_B2:
1101                 rev = 'b';
1102                 break;
1103         case T3_REV_C:
1104                 rev = 'c';
1105                 break;
1106         }
1107         return rev;
1108 }
1109
1110 static int update_tpsram(struct adapter *adap)
1111 {
1112         const struct firmware *tpsram;
1113         char buf[64];
1114         struct device *dev = &adap->pdev->dev;
1115         int ret;
1116         char rev;
1117
1118         rev = t3rev2char(adap);
1119         if (!rev)
1120                 return 0;
1121
1122         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1123
1124         ret = reject_firmware(&tpsram, buf, dev);
1125         if (ret < 0) {
1126                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1127                         buf);
1128                 return ret;
1129         }
1130
1131         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1132         if (ret)
1133                 goto release_tpsram;
1134
1135         ret = t3_set_proto_sram(adap, tpsram->data);
1136         if (ret == 0)
1137                 dev_info(dev,
1138                          "successful update of protocol engine "
1139                          "to %d.%d.%d\n",
1140                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1141         else
1142                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1143                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1144         if (ret)
1145                 dev_err(dev, "loading protocol SRAM failed\n");
1146
1147 release_tpsram:
1148         release_firmware(tpsram);
1149
1150         return ret;
1151 }
1152
1153 /**
1154  * t3_synchronize_rx - wait for current Rx processing on a port to complete
1155  * @adap: the adapter
1156  * @p: the port
1157  *
1158  * Ensures that current Rx processing on any of the queues associated with
1159  * the given port completes before returning.  We do this by acquiring and
1160  * releasing the locks of the response queues associated with the port.
1161  */
1162 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1163 {
1164         int i;
1165
1166         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1167                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1168
1169                 spin_lock_irq(&q->lock);
1170                 spin_unlock_irq(&q->lock);
1171         }
1172 }
1173
1174 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1175 {
1176         struct port_info *pi = netdev_priv(dev);
1177         struct adapter *adapter = pi->adapter;
1178
1179         if (adapter->params.rev > 0) {
1180                 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1181                                   features & NETIF_F_HW_VLAN_CTAG_RX);
1182         } else {
1183                 /* single control for all ports */
1184                 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1185
1186                 for_each_port(adapter, i)
1187                         have_vlans |=
1188                                 adapter->port[i]->features &
1189                                 NETIF_F_HW_VLAN_CTAG_RX;
1190
1191                 t3_set_vlan_accel(adapter, 1, have_vlans);
1192         }
1193         t3_synchronize_rx(adapter, pi);
1194 }
1195
1196 /**
1197  *      cxgb_up - enable the adapter
1198  *      @adapter: adapter being enabled
1199  *
1200  *      Called when the first port is enabled, this function performs the
1201  *      actions necessary to make an adapter operational, such as completing
1202  *      the initialization of HW modules, and enabling interrupts.
1203  *
1204  *      Must be called with the rtnl lock held.
1205  */
1206 static int cxgb_up(struct adapter *adap)
1207 {
1208         int i, err;
1209
1210         if (!(adap->flags & FULL_INIT_DONE)) {
1211                 err = t3_check_fw_version(adap);
1212                 if (err == -EINVAL) {
1213                         err = upgrade_fw(adap);
1214                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1215                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1216                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1217                 }
1218
1219                 err = t3_check_tpsram_version(adap);
1220                 if (err == -EINVAL) {
1221                         err = update_tpsram(adap);
1222                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1223                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1224                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1225                 }
1226
1227                 /*
1228                  * Clear interrupts now to catch errors if t3_init_hw fails.
1229                  * We clear them again later as initialization may trigger
1230                  * conditions that can interrupt.
1231                  */
1232                 t3_intr_clear(adap);
1233
1234                 err = t3_init_hw(adap, 0);
1235                 if (err)
1236                         goto out;
1237
1238                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1239                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1240
1241                 err = setup_sge_qsets(adap);
1242                 if (err)
1243                         goto out;
1244
1245                 for_each_port(adap, i)
1246                         cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1247
1248                 setup_rss(adap);
1249                 if (!(adap->flags & NAPI_INIT))
1250                         init_napi(adap);
1251
1252                 t3_start_sge_timers(adap);
1253                 adap->flags |= FULL_INIT_DONE;
1254         }
1255
1256         t3_intr_clear(adap);
1257
1258         if (adap->flags & USING_MSIX) {
1259                 name_msix_vecs(adap);
1260                 err = request_irq(adap->msix_info[0].vec,
1261                                   t3_async_intr_handler, 0,
1262                                   adap->msix_info[0].desc, adap);
1263                 if (err)
1264                         goto irq_err;
1265
1266                 err = request_msix_data_irqs(adap);
1267                 if (err) {
1268                         free_irq(adap->msix_info[0].vec, adap);
1269                         goto irq_err;
1270                 }
1271         } else if ((err = request_irq(adap->pdev->irq,
1272                                       t3_intr_handler(adap,
1273                                                       adap->sge.qs[0].rspq.
1274                                                       polling),
1275                                       (adap->flags & USING_MSI) ?
1276                                        0 : IRQF_SHARED,
1277                                       adap->name, adap)))
1278                 goto irq_err;
1279
1280         enable_all_napi(adap);
1281         t3_sge_start(adap);
1282         t3_intr_enable(adap);
1283
1284         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1285             is_offload(adap) && init_tp_parity(adap) == 0)
1286                 adap->flags |= TP_PARITY_INIT;
1287
1288         if (adap->flags & TP_PARITY_INIT) {
1289                 t3_write_reg(adap, A_TP_INT_CAUSE,
1290                              F_CMCACHEPERR | F_ARPLUTPERR);
1291                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1292         }
1293
1294         if (!(adap->flags & QUEUES_BOUND)) {
1295                 int ret = bind_qsets(adap);
1296
1297                 if (ret < 0) {
1298                         CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1299                         t3_intr_disable(adap);
1300                         quiesce_rx(adap);
1301                         free_irq_resources(adap);
1302                         err = ret;
1303                         goto out;
1304                 }
1305                 adap->flags |= QUEUES_BOUND;
1306         }
1307
1308 out:
1309         return err;
1310 irq_err:
1311         CH_ERR(adap, "request_irq failed, err %d\n", err);
1312         goto out;
1313 }
1314
1315 /*
1316  * Release resources when all the ports and offloading have been stopped.
1317  */
1318 static void cxgb_down(struct adapter *adapter, int on_wq)
1319 {
1320         t3_sge_stop(adapter);
1321         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1322         t3_intr_disable(adapter);
1323         spin_unlock_irq(&adapter->work_lock);
1324
1325         free_irq_resources(adapter);
1326         quiesce_rx(adapter);
1327         t3_sge_stop(adapter);
1328         if (!on_wq)
1329                 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1330 }
1331
1332 static void schedule_chk_task(struct adapter *adap)
1333 {
1334         unsigned int timeo;
1335
1336         timeo = adap->params.linkpoll_period ?
1337             (HZ * adap->params.linkpoll_period) / 10 :
1338             adap->params.stats_update_period * HZ;
1339         if (timeo)
1340                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1341 }
1342
1343 static int offload_open(struct net_device *dev)
1344 {
1345         struct port_info *pi = netdev_priv(dev);
1346         struct adapter *adapter = pi->adapter;
1347         struct t3cdev *tdev = dev2t3cdev(dev);
1348         int adap_up = adapter->open_device_map & PORT_MASK;
1349         int err;
1350
1351         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1352                 return 0;
1353
1354         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1355                 goto out;
1356
1357         t3_tp_set_offload_mode(adapter, 1);
1358         tdev->lldev = adapter->port[0];
1359         err = cxgb3_offload_activate(adapter);
1360         if (err)
1361                 goto out;
1362
1363         init_port_mtus(adapter);
1364         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1365                      adapter->params.b_wnd,
1366                      adapter->params.rev == 0 ?
1367                      adapter->port[0]->mtu : 0xffff);
1368         init_smt(adapter);
1369
1370         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1371                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1372
1373         /* Call back all registered clients */
1374         cxgb3_add_clients(tdev);
1375
1376 out:
1377         /* restore them in case the offload module has changed them */
1378         if (err) {
1379                 t3_tp_set_offload_mode(adapter, 0);
1380                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1381                 cxgb3_set_dummy_ops(tdev);
1382         }
1383         return err;
1384 }
1385
1386 static int offload_close(struct t3cdev *tdev)
1387 {
1388         struct adapter *adapter = tdev2adap(tdev);
1389         struct t3c_data *td = T3C_DATA(tdev);
1390
1391         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1392                 return 0;
1393
1394         /* Call back all registered clients */
1395         cxgb3_remove_clients(tdev);
1396
1397         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1398
1399         /* Flush work scheduled while releasing TIDs */
1400         flush_work(&td->tid_release_task);
1401
1402         tdev->lldev = NULL;
1403         cxgb3_set_dummy_ops(tdev);
1404         t3_tp_set_offload_mode(adapter, 0);
1405         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1406
1407         if (!adapter->open_device_map)
1408                 cxgb_down(adapter, 0);
1409
1410         cxgb3_offload_deactivate(adapter);
1411         return 0;
1412 }
1413
1414 static int cxgb_open(struct net_device *dev)
1415 {
1416         struct port_info *pi = netdev_priv(dev);
1417         struct adapter *adapter = pi->adapter;
1418         int other_ports = adapter->open_device_map & PORT_MASK;
1419         int err;
1420
1421         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1422                 return err;
1423
1424         set_bit(pi->port_id, &adapter->open_device_map);
1425         if (is_offload(adapter) && !ofld_disable) {
1426                 err = offload_open(dev);
1427                 if (err)
1428                         pr_warn("Could not initialize offload capabilities\n");
1429         }
1430
1431         netif_set_real_num_tx_queues(dev, pi->nqsets);
1432         err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1433         if (err)
1434                 return err;
1435         link_start(dev);
1436         t3_port_intr_enable(adapter, pi->port_id);
1437         netif_tx_start_all_queues(dev);
1438         if (!other_ports)
1439                 schedule_chk_task(adapter);
1440
1441         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1442         return 0;
1443 }
1444
1445 static int __cxgb_close(struct net_device *dev, int on_wq)
1446 {
1447         struct port_info *pi = netdev_priv(dev);
1448         struct adapter *adapter = pi->adapter;
1449
1450         
1451         if (!adapter->open_device_map)
1452                 return 0;
1453
1454         /* Stop link fault interrupts */
1455         t3_xgm_intr_disable(adapter, pi->port_id);
1456         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1457
1458         t3_port_intr_disable(adapter, pi->port_id);
1459         netif_tx_stop_all_queues(dev);
1460         pi->phy.ops->power_down(&pi->phy, 1);
1461         netif_carrier_off(dev);
1462         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1463
1464         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1465         clear_bit(pi->port_id, &adapter->open_device_map);
1466         spin_unlock_irq(&adapter->work_lock);
1467
1468         if (!(adapter->open_device_map & PORT_MASK))
1469                 cancel_delayed_work_sync(&adapter->adap_check_task);
1470
1471         if (!adapter->open_device_map)
1472                 cxgb_down(adapter, on_wq);
1473
1474         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1475         return 0;
1476 }
1477
1478 static int cxgb_close(struct net_device *dev)
1479 {
1480         return __cxgb_close(dev, 0);
1481 }
1482
1483 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1484 {
1485         struct port_info *pi = netdev_priv(dev);
1486         struct adapter *adapter = pi->adapter;
1487         struct net_device_stats *ns = &dev->stats;
1488         const struct mac_stats *pstats;
1489
1490         spin_lock(&adapter->stats_lock);
1491         pstats = t3_mac_update_stats(&pi->mac);
1492         spin_unlock(&adapter->stats_lock);
1493
1494         ns->tx_bytes = pstats->tx_octets;
1495         ns->tx_packets = pstats->tx_frames;
1496         ns->rx_bytes = pstats->rx_octets;
1497         ns->rx_packets = pstats->rx_frames;
1498         ns->multicast = pstats->rx_mcast_frames;
1499
1500         ns->tx_errors = pstats->tx_underrun;
1501         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1502             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1503             pstats->rx_fifo_ovfl;
1504
1505         /* detailed rx_errors */
1506         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1507         ns->rx_over_errors = 0;
1508         ns->rx_crc_errors = pstats->rx_fcs_errs;
1509         ns->rx_frame_errors = pstats->rx_symbol_errs;
1510         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1511         ns->rx_missed_errors = pstats->rx_cong_drops;
1512
1513         /* detailed tx_errors */
1514         ns->tx_aborted_errors = 0;
1515         ns->tx_carrier_errors = 0;
1516         ns->tx_fifo_errors = pstats->tx_underrun;
1517         ns->tx_heartbeat_errors = 0;
1518         ns->tx_window_errors = 0;
1519         return ns;
1520 }
1521
1522 static u32 get_msglevel(struct net_device *dev)
1523 {
1524         struct port_info *pi = netdev_priv(dev);
1525         struct adapter *adapter = pi->adapter;
1526
1527         return adapter->msg_enable;
1528 }
1529
1530 static void set_msglevel(struct net_device *dev, u32 val)
1531 {
1532         struct port_info *pi = netdev_priv(dev);
1533         struct adapter *adapter = pi->adapter;
1534
1535         adapter->msg_enable = val;
1536 }
1537
1538 static const char stats_strings[][ETH_GSTRING_LEN] = {
1539         "TxOctetsOK         ",
1540         "TxFramesOK         ",
1541         "TxMulticastFramesOK",
1542         "TxBroadcastFramesOK",
1543         "TxPauseFrames      ",
1544         "TxUnderrun         ",
1545         "TxExtUnderrun      ",
1546
1547         "TxFrames64         ",
1548         "TxFrames65To127    ",
1549         "TxFrames128To255   ",
1550         "TxFrames256To511   ",
1551         "TxFrames512To1023  ",
1552         "TxFrames1024To1518 ",
1553         "TxFrames1519ToMax  ",
1554
1555         "RxOctetsOK         ",
1556         "RxFramesOK         ",
1557         "RxMulticastFramesOK",
1558         "RxBroadcastFramesOK",
1559         "RxPauseFrames      ",
1560         "RxFCSErrors        ",
1561         "RxSymbolErrors     ",
1562         "RxShortErrors      ",
1563         "RxJabberErrors     ",
1564         "RxLengthErrors     ",
1565         "RxFIFOoverflow     ",
1566
1567         "RxFrames64         ",
1568         "RxFrames65To127    ",
1569         "RxFrames128To255   ",
1570         "RxFrames256To511   ",
1571         "RxFrames512To1023  ",
1572         "RxFrames1024To1518 ",
1573         "RxFrames1519ToMax  ",
1574
1575         "PhyFIFOErrors      ",
1576         "TSO                ",
1577         "VLANextractions    ",
1578         "VLANinsertions     ",
1579         "TxCsumOffload      ",
1580         "RxCsumGood         ",
1581         "LroAggregated      ",
1582         "LroFlushed         ",
1583         "LroNoDesc          ",
1584         "RxDrops            ",
1585
1586         "CheckTXEnToggled   ",
1587         "CheckResets        ",
1588
1589         "LinkFaults         ",
1590 };
1591
1592 static int get_sset_count(struct net_device *dev, int sset)
1593 {
1594         switch (sset) {
1595         case ETH_SS_STATS:
1596                 return ARRAY_SIZE(stats_strings);
1597         default:
1598                 return -EOPNOTSUPP;
1599         }
1600 }
1601
1602 #define T3_REGMAP_SIZE (3 * 1024)
1603
1604 static int get_regs_len(struct net_device *dev)
1605 {
1606         return T3_REGMAP_SIZE;
1607 }
1608
1609 static int get_eeprom_len(struct net_device *dev)
1610 {
1611         return EEPROMSIZE;
1612 }
1613
1614 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1615 {
1616         struct port_info *pi = netdev_priv(dev);
1617         struct adapter *adapter = pi->adapter;
1618         u32 fw_vers = 0;
1619         u32 tp_vers = 0;
1620
1621         spin_lock(&adapter->stats_lock);
1622         t3_get_fw_version(adapter, &fw_vers);
1623         t3_get_tp_version(adapter, &tp_vers);
1624         spin_unlock(&adapter->stats_lock);
1625
1626         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1627         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1628         strlcpy(info->bus_info, pci_name(adapter->pdev),
1629                 sizeof(info->bus_info));
1630         if (fw_vers)
1631                 snprintf(info->fw_version, sizeof(info->fw_version),
1632                          "%s %u.%u.%u TP %u.%u.%u",
1633                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1634                          G_FW_VERSION_MAJOR(fw_vers),
1635                          G_FW_VERSION_MINOR(fw_vers),
1636                          G_FW_VERSION_MICRO(fw_vers),
1637                          G_TP_VERSION_MAJOR(tp_vers),
1638                          G_TP_VERSION_MINOR(tp_vers),
1639                          G_TP_VERSION_MICRO(tp_vers));
1640 }
1641
1642 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1643 {
1644         if (stringset == ETH_SS_STATS)
1645                 memcpy(data, stats_strings, sizeof(stats_strings));
1646 }
1647
1648 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1649                                             struct port_info *p, int idx)
1650 {
1651         int i;
1652         unsigned long tot = 0;
1653
1654         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1655                 tot += adapter->sge.qs[i].port_stats[idx];
1656         return tot;
1657 }
1658
1659 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1660                       u64 *data)
1661 {
1662         struct port_info *pi = netdev_priv(dev);
1663         struct adapter *adapter = pi->adapter;
1664         const struct mac_stats *s;
1665
1666         spin_lock(&adapter->stats_lock);
1667         s = t3_mac_update_stats(&pi->mac);
1668         spin_unlock(&adapter->stats_lock);
1669
1670         *data++ = s->tx_octets;
1671         *data++ = s->tx_frames;
1672         *data++ = s->tx_mcast_frames;
1673         *data++ = s->tx_bcast_frames;
1674         *data++ = s->tx_pause;
1675         *data++ = s->tx_underrun;
1676         *data++ = s->tx_fifo_urun;
1677
1678         *data++ = s->tx_frames_64;
1679         *data++ = s->tx_frames_65_127;
1680         *data++ = s->tx_frames_128_255;
1681         *data++ = s->tx_frames_256_511;
1682         *data++ = s->tx_frames_512_1023;
1683         *data++ = s->tx_frames_1024_1518;
1684         *data++ = s->tx_frames_1519_max;
1685
1686         *data++ = s->rx_octets;
1687         *data++ = s->rx_frames;
1688         *data++ = s->rx_mcast_frames;
1689         *data++ = s->rx_bcast_frames;
1690         *data++ = s->rx_pause;
1691         *data++ = s->rx_fcs_errs;
1692         *data++ = s->rx_symbol_errs;
1693         *data++ = s->rx_short;
1694         *data++ = s->rx_jabber;
1695         *data++ = s->rx_too_long;
1696         *data++ = s->rx_fifo_ovfl;
1697
1698         *data++ = s->rx_frames_64;
1699         *data++ = s->rx_frames_65_127;
1700         *data++ = s->rx_frames_128_255;
1701         *data++ = s->rx_frames_256_511;
1702         *data++ = s->rx_frames_512_1023;
1703         *data++ = s->rx_frames_1024_1518;
1704         *data++ = s->rx_frames_1519_max;
1705
1706         *data++ = pi->phy.fifo_errors;
1707
1708         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1709         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1710         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1711         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1712         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1713         *data++ = 0;
1714         *data++ = 0;
1715         *data++ = 0;
1716         *data++ = s->rx_cong_drops;
1717
1718         *data++ = s->num_toggled;
1719         *data++ = s->num_resets;
1720
1721         *data++ = s->link_faults;
1722 }
1723
1724 static inline void reg_block_dump(struct adapter *ap, void *buf,
1725                                   unsigned int start, unsigned int end)
1726 {
1727         u32 *p = buf + start;
1728
1729         for (; start <= end; start += sizeof(u32))
1730                 *p++ = t3_read_reg(ap, start);
1731 }
1732
1733 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1734                      void *buf)
1735 {
1736         struct port_info *pi = netdev_priv(dev);
1737         struct adapter *ap = pi->adapter;
1738
1739         /*
1740          * Version scheme:
1741          * bits 0..9: chip version
1742          * bits 10..15: chip revision
1743          * bit 31: set for PCIe cards
1744          */
1745         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1746
1747         /*
1748          * We skip the MAC statistics registers because they are clear-on-read.
1749          * Also reading multi-register stats would need to synchronize with the
1750          * periodic mac stats accumulation.  Hard to justify the complexity.
1751          */
1752         memset(buf, 0, T3_REGMAP_SIZE);
1753         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1754         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1755         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1756         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1757         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1758         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1759                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1760         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1761                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1762 }
1763
1764 static int restart_autoneg(struct net_device *dev)
1765 {
1766         struct port_info *p = netdev_priv(dev);
1767
1768         if (!netif_running(dev))
1769                 return -EAGAIN;
1770         if (p->link_config.autoneg != AUTONEG_ENABLE)
1771                 return -EINVAL;
1772         p->phy.ops->autoneg_restart(&p->phy);
1773         return 0;
1774 }
1775
1776 static int set_phys_id(struct net_device *dev,
1777                        enum ethtool_phys_id_state state)
1778 {
1779         struct port_info *pi = netdev_priv(dev);
1780         struct adapter *adapter = pi->adapter;
1781
1782         switch (state) {
1783         case ETHTOOL_ID_ACTIVE:
1784                 return 1;       /* cycle on/off once per second */
1785
1786         case ETHTOOL_ID_OFF:
1787                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1788                 break;
1789
1790         case ETHTOOL_ID_ON:
1791         case ETHTOOL_ID_INACTIVE:
1792                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1793                          F_GPIO0_OUT_VAL);
1794         }
1795
1796         return 0;
1797 }
1798
1799 static int get_link_ksettings(struct net_device *dev,
1800                               struct ethtool_link_ksettings *cmd)
1801 {
1802         struct port_info *p = netdev_priv(dev);
1803         u32 supported;
1804
1805         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1806                                                 p->link_config.supported);
1807         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1808                                                 p->link_config.advertising);
1809
1810         if (netif_carrier_ok(dev)) {
1811                 cmd->base.speed = p->link_config.speed;
1812                 cmd->base.duplex = p->link_config.duplex;
1813         } else {
1814                 cmd->base.speed = SPEED_UNKNOWN;
1815                 cmd->base.duplex = DUPLEX_UNKNOWN;
1816         }
1817
1818         ethtool_convert_link_mode_to_legacy_u32(&supported,
1819                                                 cmd->link_modes.supported);
1820
1821         cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1822         cmd->base.phy_address = p->phy.mdio.prtad;
1823         cmd->base.autoneg = p->link_config.autoneg;
1824         return 0;
1825 }
1826
1827 static int speed_duplex_to_caps(int speed, int duplex)
1828 {
1829         int cap = 0;
1830
1831         switch (speed) {
1832         case SPEED_10:
1833                 if (duplex == DUPLEX_FULL)
1834                         cap = SUPPORTED_10baseT_Full;
1835                 else
1836                         cap = SUPPORTED_10baseT_Half;
1837                 break;
1838         case SPEED_100:
1839                 if (duplex == DUPLEX_FULL)
1840                         cap = SUPPORTED_100baseT_Full;
1841                 else
1842                         cap = SUPPORTED_100baseT_Half;
1843                 break;
1844         case SPEED_1000:
1845                 if (duplex == DUPLEX_FULL)
1846                         cap = SUPPORTED_1000baseT_Full;
1847                 else
1848                         cap = SUPPORTED_1000baseT_Half;
1849                 break;
1850         case SPEED_10000:
1851                 if (duplex == DUPLEX_FULL)
1852                         cap = SUPPORTED_10000baseT_Full;
1853         }
1854         return cap;
1855 }
1856
1857 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1858                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1859                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1860                       ADVERTISED_10000baseT_Full)
1861
1862 static int set_link_ksettings(struct net_device *dev,
1863                               const struct ethtool_link_ksettings *cmd)
1864 {
1865         struct port_info *p = netdev_priv(dev);
1866         struct link_config *lc = &p->link_config;
1867         u32 advertising;
1868
1869         ethtool_convert_link_mode_to_legacy_u32(&advertising,
1870                                                 cmd->link_modes.advertising);
1871
1872         if (!(lc->supported & SUPPORTED_Autoneg)) {
1873                 /*
1874                  * PHY offers a single speed/duplex.  See if that's what's
1875                  * being requested.
1876                  */
1877                 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1878                         u32 speed = cmd->base.speed;
1879                         int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1880                         if (lc->supported & cap)
1881                                 return 0;
1882                 }
1883                 return -EINVAL;
1884         }
1885
1886         if (cmd->base.autoneg == AUTONEG_DISABLE) {
1887                 u32 speed = cmd->base.speed;
1888                 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1889
1890                 if (!(lc->supported & cap) || (speed == SPEED_1000))
1891                         return -EINVAL;
1892                 lc->requested_speed = speed;
1893                 lc->requested_duplex = cmd->base.duplex;
1894                 lc->advertising = 0;
1895         } else {
1896                 advertising &= ADVERTISED_MASK;
1897                 advertising &= lc->supported;
1898                 if (!advertising)
1899                         return -EINVAL;
1900                 lc->requested_speed = SPEED_INVALID;
1901                 lc->requested_duplex = DUPLEX_INVALID;
1902                 lc->advertising = advertising | ADVERTISED_Autoneg;
1903         }
1904         lc->autoneg = cmd->base.autoneg;
1905         if (netif_running(dev))
1906                 t3_link_start(&p->phy, &p->mac, lc);
1907         return 0;
1908 }
1909
1910 static void get_pauseparam(struct net_device *dev,
1911                            struct ethtool_pauseparam *epause)
1912 {
1913         struct port_info *p = netdev_priv(dev);
1914
1915         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1916         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1917         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1918 }
1919
1920 static int set_pauseparam(struct net_device *dev,
1921                           struct ethtool_pauseparam *epause)
1922 {
1923         struct port_info *p = netdev_priv(dev);
1924         struct link_config *lc = &p->link_config;
1925
1926         if (epause->autoneg == AUTONEG_DISABLE)
1927                 lc->requested_fc = 0;
1928         else if (lc->supported & SUPPORTED_Autoneg)
1929                 lc->requested_fc = PAUSE_AUTONEG;
1930         else
1931                 return -EINVAL;
1932
1933         if (epause->rx_pause)
1934                 lc->requested_fc |= PAUSE_RX;
1935         if (epause->tx_pause)
1936                 lc->requested_fc |= PAUSE_TX;
1937         if (lc->autoneg == AUTONEG_ENABLE) {
1938                 if (netif_running(dev))
1939                         t3_link_start(&p->phy, &p->mac, lc);
1940         } else {
1941                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1942                 if (netif_running(dev))
1943                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1944         }
1945         return 0;
1946 }
1947
1948 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1949 {
1950         struct port_info *pi = netdev_priv(dev);
1951         struct adapter *adapter = pi->adapter;
1952         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1953
1954         e->rx_max_pending = MAX_RX_BUFFERS;
1955         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1956         e->tx_max_pending = MAX_TXQ_ENTRIES;
1957
1958         e->rx_pending = q->fl_size;
1959         e->rx_mini_pending = q->rspq_size;
1960         e->rx_jumbo_pending = q->jumbo_size;
1961         e->tx_pending = q->txq_size[0];
1962 }
1963
1964 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1965 {
1966         struct port_info *pi = netdev_priv(dev);
1967         struct adapter *adapter = pi->adapter;
1968         struct qset_params *q;
1969         int i;
1970
1971         if (e->rx_pending > MAX_RX_BUFFERS ||
1972             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1973             e->tx_pending > MAX_TXQ_ENTRIES ||
1974             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1975             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1976             e->rx_pending < MIN_FL_ENTRIES ||
1977             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1978             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1979                 return -EINVAL;
1980
1981         if (adapter->flags & FULL_INIT_DONE)
1982                 return -EBUSY;
1983
1984         q = &adapter->params.sge.qset[pi->first_qset];
1985         for (i = 0; i < pi->nqsets; ++i, ++q) {
1986                 q->rspq_size = e->rx_mini_pending;
1987                 q->fl_size = e->rx_pending;
1988                 q->jumbo_size = e->rx_jumbo_pending;
1989                 q->txq_size[0] = e->tx_pending;
1990                 q->txq_size[1] = e->tx_pending;
1991                 q->txq_size[2] = e->tx_pending;
1992         }
1993         return 0;
1994 }
1995
1996 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1997 {
1998         struct port_info *pi = netdev_priv(dev);
1999         struct adapter *adapter = pi->adapter;
2000         struct qset_params *qsp;
2001         struct sge_qset *qs;
2002         int i;
2003
2004         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2005                 return -EINVAL;
2006
2007         for (i = 0; i < pi->nqsets; i++) {
2008                 qsp = &adapter->params.sge.qset[i];
2009                 qs = &adapter->sge.qs[i];
2010                 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2011                 t3_update_qset_coalesce(qs, qsp);
2012         }
2013
2014         return 0;
2015 }
2016
2017 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2018 {
2019         struct port_info *pi = netdev_priv(dev);
2020         struct adapter *adapter = pi->adapter;
2021         struct qset_params *q = adapter->params.sge.qset;
2022
2023         c->rx_coalesce_usecs = q->coalesce_usecs;
2024         return 0;
2025 }
2026
2027 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2028                       u8 * data)
2029 {
2030         struct port_info *pi = netdev_priv(dev);
2031         struct adapter *adapter = pi->adapter;
2032         int i, err = 0;
2033
2034         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2035         if (!buf)
2036                 return -ENOMEM;
2037
2038         e->magic = EEPROM_MAGIC;
2039         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2040                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2041
2042         if (!err)
2043                 memcpy(data, buf + e->offset, e->len);
2044         kfree(buf);
2045         return err;
2046 }
2047
2048 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2049                       u8 * data)
2050 {
2051         struct port_info *pi = netdev_priv(dev);
2052         struct adapter *adapter = pi->adapter;
2053         u32 aligned_offset, aligned_len;
2054         __le32 *p;
2055         u8 *buf;
2056         int err;
2057
2058         if (eeprom->magic != EEPROM_MAGIC)
2059                 return -EINVAL;
2060
2061         aligned_offset = eeprom->offset & ~3;
2062         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2063
2064         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2065                 buf = kmalloc(aligned_len, GFP_KERNEL);
2066                 if (!buf)
2067                         return -ENOMEM;
2068                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2069                 if (!err && aligned_len > 4)
2070                         err = t3_seeprom_read(adapter,
2071                                               aligned_offset + aligned_len - 4,
2072                                               (__le32 *) & buf[aligned_len - 4]);
2073                 if (err)
2074                         goto out;
2075                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2076         } else
2077                 buf = data;
2078
2079         err = t3_seeprom_wp(adapter, 0);
2080         if (err)
2081                 goto out;
2082
2083         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2084                 err = t3_seeprom_write(adapter, aligned_offset, *p);
2085                 aligned_offset += 4;
2086         }
2087
2088         if (!err)
2089                 err = t3_seeprom_wp(adapter, 1);
2090 out:
2091         if (buf != data)
2092                 kfree(buf);
2093         return err;
2094 }
2095
2096 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2097 {
2098         wol->supported = 0;
2099         wol->wolopts = 0;
2100         memset(&wol->sopass, 0, sizeof(wol->sopass));
2101 }
2102
2103 static const struct ethtool_ops cxgb_ethtool_ops = {
2104         .get_drvinfo = get_drvinfo,
2105         .get_msglevel = get_msglevel,
2106         .set_msglevel = set_msglevel,
2107         .get_ringparam = get_sge_param,
2108         .set_ringparam = set_sge_param,
2109         .get_coalesce = get_coalesce,
2110         .set_coalesce = set_coalesce,
2111         .get_eeprom_len = get_eeprom_len,
2112         .get_eeprom = get_eeprom,
2113         .set_eeprom = set_eeprom,
2114         .get_pauseparam = get_pauseparam,
2115         .set_pauseparam = set_pauseparam,
2116         .get_link = ethtool_op_get_link,
2117         .get_strings = get_strings,
2118         .set_phys_id = set_phys_id,
2119         .nway_reset = restart_autoneg,
2120         .get_sset_count = get_sset_count,
2121         .get_ethtool_stats = get_stats,
2122         .get_regs_len = get_regs_len,
2123         .get_regs = get_regs,
2124         .get_wol = get_wol,
2125         .get_link_ksettings = get_link_ksettings,
2126         .set_link_ksettings = set_link_ksettings,
2127 };
2128
2129 static int in_range(int val, int lo, int hi)
2130 {
2131         return val < 0 || (val <= hi && val >= lo);
2132 }
2133
2134 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2135 {
2136         struct port_info *pi = netdev_priv(dev);
2137         struct adapter *adapter = pi->adapter;
2138         u32 cmd;
2139         int ret;
2140
2141         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2142                 return -EFAULT;
2143
2144         switch (cmd) {
2145         case CHELSIO_SET_QSET_PARAMS:{
2146                 int i;
2147                 struct qset_params *q;
2148                 struct ch_qset_params t;
2149                 int q1 = pi->first_qset;
2150                 int nqsets = pi->nqsets;
2151
2152                 if (!capable(CAP_NET_ADMIN))
2153                         return -EPERM;
2154                 if (copy_from_user(&t, useraddr, sizeof(t)))
2155                         return -EFAULT;
2156                 if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2157                         return -EINVAL;
2158                 if (t.qset_idx >= SGE_QSETS)
2159                         return -EINVAL;
2160                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2161                     !in_range(t.cong_thres, 0, 255) ||
2162                     !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2163                               MAX_TXQ_ENTRIES) ||
2164                     !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2165                               MAX_TXQ_ENTRIES) ||
2166                     !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2167                               MAX_CTRL_TXQ_ENTRIES) ||
2168                     !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2169                               MAX_RX_BUFFERS) ||
2170                     !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2171                               MAX_RX_JUMBO_BUFFERS) ||
2172                     !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2173                               MAX_RSPQ_ENTRIES))
2174                         return -EINVAL;
2175
2176                 if ((adapter->flags & FULL_INIT_DONE) &&
2177                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2178                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2179                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2180                         t.polling >= 0 || t.cong_thres >= 0))
2181                         return -EBUSY;
2182
2183                 /* Allow setting of any available qset when offload enabled */
2184                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2185                         q1 = 0;
2186                         for_each_port(adapter, i) {
2187                                 pi = adap2pinfo(adapter, i);
2188                                 nqsets += pi->first_qset + pi->nqsets;
2189                         }
2190                 }
2191
2192                 if (t.qset_idx < q1)
2193                         return -EINVAL;
2194                 if (t.qset_idx > q1 + nqsets - 1)
2195                         return -EINVAL;
2196
2197                 q = &adapter->params.sge.qset[t.qset_idx];
2198
2199                 if (t.rspq_size >= 0)
2200                         q->rspq_size = t.rspq_size;
2201                 if (t.fl_size[0] >= 0)
2202                         q->fl_size = t.fl_size[0];
2203                 if (t.fl_size[1] >= 0)
2204                         q->jumbo_size = t.fl_size[1];
2205                 if (t.txq_size[0] >= 0)
2206                         q->txq_size[0] = t.txq_size[0];
2207                 if (t.txq_size[1] >= 0)
2208                         q->txq_size[1] = t.txq_size[1];
2209                 if (t.txq_size[2] >= 0)
2210                         q->txq_size[2] = t.txq_size[2];
2211                 if (t.cong_thres >= 0)
2212                         q->cong_thres = t.cong_thres;
2213                 if (t.intr_lat >= 0) {
2214                         struct sge_qset *qs =
2215                                 &adapter->sge.qs[t.qset_idx];
2216
2217                         q->coalesce_usecs = t.intr_lat;
2218                         t3_update_qset_coalesce(qs, q);
2219                 }
2220                 if (t.polling >= 0) {
2221                         if (adapter->flags & USING_MSIX)
2222                                 q->polling = t.polling;
2223                         else {
2224                                 /* No polling with INTx for T3A */
2225                                 if (adapter->params.rev == 0 &&
2226                                         !(adapter->flags & USING_MSI))
2227                                         t.polling = 0;
2228
2229                                 for (i = 0; i < SGE_QSETS; i++) {
2230                                         q = &adapter->params.sge.
2231                                                 qset[i];
2232                                         q->polling = t.polling;
2233                                 }
2234                         }
2235                 }
2236
2237                 if (t.lro >= 0) {
2238                         if (t.lro)
2239                                 dev->wanted_features |= NETIF_F_GRO;
2240                         else
2241                                 dev->wanted_features &= ~NETIF_F_GRO;
2242                         netdev_update_features(dev);
2243                 }
2244
2245                 break;
2246         }
2247         case CHELSIO_GET_QSET_PARAMS:{
2248                 struct qset_params *q;
2249                 struct ch_qset_params t;
2250                 int q1 = pi->first_qset;
2251                 int nqsets = pi->nqsets;
2252                 int i;
2253
2254                 if (copy_from_user(&t, useraddr, sizeof(t)))
2255                         return -EFAULT;
2256
2257                 if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2258                         return -EINVAL;
2259
2260                 /* Display qsets for all ports when offload enabled */
2261                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2262                         q1 = 0;
2263                         for_each_port(adapter, i) {
2264                                 pi = adap2pinfo(adapter, i);
2265                                 nqsets = pi->first_qset + pi->nqsets;
2266                         }
2267                 }
2268
2269                 if (t.qset_idx >= nqsets)
2270                         return -EINVAL;
2271                 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2272
2273                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2274                 t.rspq_size = q->rspq_size;
2275                 t.txq_size[0] = q->txq_size[0];
2276                 t.txq_size[1] = q->txq_size[1];
2277                 t.txq_size[2] = q->txq_size[2];
2278                 t.fl_size[0] = q->fl_size;
2279                 t.fl_size[1] = q->jumbo_size;
2280                 t.polling = q->polling;
2281                 t.lro = !!(dev->features & NETIF_F_GRO);
2282                 t.intr_lat = q->coalesce_usecs;
2283                 t.cong_thres = q->cong_thres;
2284                 t.qnum = q1;
2285
2286                 if (adapter->flags & USING_MSIX)
2287                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2288                 else
2289                         t.vector = adapter->pdev->irq;
2290
2291                 if (copy_to_user(useraddr, &t, sizeof(t)))
2292                         return -EFAULT;
2293                 break;
2294         }
2295         case CHELSIO_SET_QSET_NUM:{
2296                 struct ch_reg edata;
2297                 unsigned int i, first_qset = 0, other_qsets = 0;
2298
2299                 if (!capable(CAP_NET_ADMIN))
2300                         return -EPERM;
2301                 if (adapter->flags & FULL_INIT_DONE)
2302                         return -EBUSY;
2303                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2304                         return -EFAULT;
2305                 if (edata.cmd != CHELSIO_SET_QSET_NUM)
2306                         return -EINVAL;
2307                 if (edata.val < 1 ||
2308                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2309                         return -EINVAL;
2310
2311                 for_each_port(adapter, i)
2312                         if (adapter->port[i] && adapter->port[i] != dev)
2313                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2314
2315                 if (edata.val + other_qsets > SGE_QSETS)
2316                         return -EINVAL;
2317
2318                 pi->nqsets = edata.val;
2319
2320                 for_each_port(adapter, i)
2321                         if (adapter->port[i]) {
2322                                 pi = adap2pinfo(adapter, i);
2323                                 pi->first_qset = first_qset;
2324                                 first_qset += pi->nqsets;
2325                         }
2326                 break;
2327         }
2328         case CHELSIO_GET_QSET_NUM:{
2329                 struct ch_reg edata;
2330
2331                 memset(&edata, 0, sizeof(struct ch_reg));
2332
2333                 edata.cmd = CHELSIO_GET_QSET_NUM;
2334                 edata.val = pi->nqsets;
2335                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2336                         return -EFAULT;
2337                 break;
2338         }
2339         case CHELSIO_LOAD_FW:{
2340                 u8 *fw_data;
2341                 struct ch_mem_range t;
2342
2343                 if (!capable(CAP_SYS_RAWIO))
2344                         return -EPERM;
2345                 if (copy_from_user(&t, useraddr, sizeof(t)))
2346                         return -EFAULT;
2347                 if (t.cmd != CHELSIO_LOAD_FW)
2348                         return -EINVAL;
2349                 /* Check t.len sanity ? */
2350                 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2351                 if (IS_ERR(fw_data))
2352                         return PTR_ERR(fw_data);
2353
2354                 ret = t3_load_fw(adapter, fw_data, t.len);
2355                 kfree(fw_data);
2356                 if (ret)
2357                         return ret;
2358                 break;
2359         }
2360         case CHELSIO_SETMTUTAB:{
2361                 struct ch_mtus m;
2362                 int i;
2363
2364                 if (!is_offload(adapter))
2365                         return -EOPNOTSUPP;
2366                 if (!capable(CAP_NET_ADMIN))
2367                         return -EPERM;
2368                 if (offload_running(adapter))
2369                         return -EBUSY;
2370                 if (copy_from_user(&m, useraddr, sizeof(m)))
2371                         return -EFAULT;
2372                 if (m.cmd != CHELSIO_SETMTUTAB)
2373                         return -EINVAL;
2374                 if (m.nmtus != NMTUS)
2375                         return -EINVAL;
2376                 if (m.mtus[0] < 81)     /* accommodate SACK */
2377                         return -EINVAL;
2378
2379                 /* MTUs must be in ascending order */
2380                 for (i = 1; i < NMTUS; ++i)
2381                         if (m.mtus[i] < m.mtus[i - 1])
2382                                 return -EINVAL;
2383
2384                 memcpy(adapter->params.mtus, m.mtus,
2385                         sizeof(adapter->params.mtus));
2386                 break;
2387         }
2388         case CHELSIO_GET_PM:{
2389                 struct tp_params *p = &adapter->params.tp;
2390                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2391
2392                 if (!is_offload(adapter))
2393                         return -EOPNOTSUPP;
2394                 m.tx_pg_sz = p->tx_pg_size;
2395                 m.tx_num_pg = p->tx_num_pgs;
2396                 m.rx_pg_sz = p->rx_pg_size;
2397                 m.rx_num_pg = p->rx_num_pgs;
2398                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2399                 if (copy_to_user(useraddr, &m, sizeof(m)))
2400                         return -EFAULT;
2401                 break;
2402         }
2403         case CHELSIO_SET_PM:{
2404                 struct ch_pm m;
2405                 struct tp_params *p = &adapter->params.tp;
2406
2407                 if (!is_offload(adapter))
2408                         return -EOPNOTSUPP;
2409                 if (!capable(CAP_NET_ADMIN))
2410                         return -EPERM;
2411                 if (adapter->flags & FULL_INIT_DONE)
2412                         return -EBUSY;
2413                 if (copy_from_user(&m, useraddr, sizeof(m)))
2414                         return -EFAULT;
2415                 if (m.cmd != CHELSIO_SET_PM)
2416                         return -EINVAL;
2417                 if (!is_power_of_2(m.rx_pg_sz) ||
2418                         !is_power_of_2(m.tx_pg_sz))
2419                         return -EINVAL; /* not power of 2 */
2420                 if (!(m.rx_pg_sz & 0x14000))
2421                         return -EINVAL; /* not 16KB or 64KB */
2422                 if (!(m.tx_pg_sz & 0x1554000))
2423                         return -EINVAL;
2424                 if (m.tx_num_pg == -1)
2425                         m.tx_num_pg = p->tx_num_pgs;
2426                 if (m.rx_num_pg == -1)
2427                         m.rx_num_pg = p->rx_num_pgs;
2428                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2429                         return -EINVAL;
2430                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2431                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2432                         return -EINVAL;
2433                 p->rx_pg_size = m.rx_pg_sz;
2434                 p->tx_pg_size = m.tx_pg_sz;
2435                 p->rx_num_pgs = m.rx_num_pg;
2436                 p->tx_num_pgs = m.tx_num_pg;
2437                 break;
2438         }
2439         case CHELSIO_GET_MEM:{
2440                 struct ch_mem_range t;
2441                 struct mc7 *mem;
2442                 u64 buf[32];
2443
2444                 if (!is_offload(adapter))
2445                         return -EOPNOTSUPP;
2446                 if (!capable(CAP_NET_ADMIN))
2447                         return -EPERM;
2448                 if (!(adapter->flags & FULL_INIT_DONE))
2449                         return -EIO;    /* need the memory controllers */
2450                 if (copy_from_user(&t, useraddr, sizeof(t)))
2451                         return -EFAULT;
2452                 if (t.cmd != CHELSIO_GET_MEM)
2453                         return -EINVAL;
2454                 if ((t.addr & 7) || (t.len & 7))
2455                         return -EINVAL;
2456                 if (t.mem_id == MEM_CM)
2457                         mem = &adapter->cm;
2458                 else if (t.mem_id == MEM_PMRX)
2459                         mem = &adapter->pmrx;
2460                 else if (t.mem_id == MEM_PMTX)
2461                         mem = &adapter->pmtx;
2462                 else
2463                         return -EINVAL;
2464
2465                 /*
2466                  * Version scheme:
2467                  * bits 0..9: chip version
2468                  * bits 10..15: chip revision
2469                  */
2470                 t.version = 3 | (adapter->params.rev << 10);
2471                 if (copy_to_user(useraddr, &t, sizeof(t)))
2472                         return -EFAULT;
2473
2474                 /*
2475                  * Read 256 bytes at a time as len can be large and we don't
2476                  * want to use huge intermediate buffers.
2477                  */
2478                 useraddr += sizeof(t);  /* advance to start of buffer */
2479                 while (t.len) {
2480                         unsigned int chunk =
2481                                 min_t(unsigned int, t.len, sizeof(buf));
2482
2483                         ret =
2484                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2485                                                 buf);
2486                         if (ret)
2487                                 return ret;
2488                         if (copy_to_user(useraddr, buf, chunk))
2489                                 return -EFAULT;
2490                         useraddr += chunk;
2491                         t.addr += chunk;
2492                         t.len -= chunk;
2493                 }
2494                 break;
2495         }
2496         case CHELSIO_SET_TRACE_FILTER:{
2497                 struct ch_trace t;
2498                 const struct trace_params *tp;
2499
2500                 if (!capable(CAP_NET_ADMIN))
2501                         return -EPERM;
2502                 if (!offload_running(adapter))
2503                         return -EAGAIN;
2504                 if (copy_from_user(&t, useraddr, sizeof(t)))
2505                         return -EFAULT;
2506                 if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2507                         return -EINVAL;
2508
2509                 tp = (const struct trace_params *)&t.sip;
2510                 if (t.config_tx)
2511                         t3_config_trace_filter(adapter, tp, 0,
2512                                                 t.invert_match,
2513                                                 t.trace_tx);
2514                 if (t.config_rx)
2515                         t3_config_trace_filter(adapter, tp, 1,
2516                                                 t.invert_match,
2517                                                 t.trace_rx);
2518                 break;
2519         }
2520         default:
2521                 return -EOPNOTSUPP;
2522         }
2523         return 0;
2524 }
2525
2526 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2527 {
2528         struct mii_ioctl_data *data = if_mii(req);
2529         struct port_info *pi = netdev_priv(dev);
2530         struct adapter *adapter = pi->adapter;
2531
2532         switch (cmd) {
2533         case SIOCGMIIREG:
2534         case SIOCSMIIREG:
2535                 /* Convert phy_id from older PRTAD/DEVAD format */
2536                 if (is_10G(adapter) &&
2537                     !mdio_phy_id_is_c45(data->phy_id) &&
2538                     (data->phy_id & 0x1f00) &&
2539                     !(data->phy_id & 0xe0e0))
2540                         data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2541                                                        data->phy_id & 0x1f);
2542                 /* FALLTHRU */
2543         case SIOCGMIIPHY:
2544                 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2545         case SIOCCHIOCTL:
2546                 return cxgb_extension_ioctl(dev, req->ifr_data);
2547         default:
2548                 return -EOPNOTSUPP;
2549         }
2550 }
2551
2552 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2553 {
2554         struct port_info *pi = netdev_priv(dev);
2555         struct adapter *adapter = pi->adapter;
2556         int ret;
2557
2558         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2559                 return ret;
2560         dev->mtu = new_mtu;
2561         init_port_mtus(adapter);
2562         if (adapter->params.rev == 0 && offload_running(adapter))
2563                 t3_load_mtus(adapter, adapter->params.mtus,
2564                              adapter->params.a_wnd, adapter->params.b_wnd,
2565                              adapter->port[0]->mtu);
2566         return 0;
2567 }
2568
2569 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2570 {
2571         struct port_info *pi = netdev_priv(dev);
2572         struct adapter *adapter = pi->adapter;
2573         struct sockaddr *addr = p;
2574
2575         if (!is_valid_ether_addr(addr->sa_data))
2576                 return -EADDRNOTAVAIL;
2577
2578         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2579         t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2580         if (offload_running(adapter))
2581                 write_smt_entry(adapter, pi->port_id);
2582         return 0;
2583 }
2584
2585 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2586         netdev_features_t features)
2587 {
2588         /*
2589          * Since there is no support for separate rx/tx vlan accel
2590          * enable/disable make sure tx flag is always in same state as rx.
2591          */
2592         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2593                 features |= NETIF_F_HW_VLAN_CTAG_TX;
2594         else
2595                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2596
2597         return features;
2598 }
2599
2600 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2601 {
2602         netdev_features_t changed = dev->features ^ features;
2603
2604         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2605                 cxgb_vlan_mode(dev, features);
2606
2607         return 0;
2608 }
2609
2610 #ifdef CONFIG_NET_POLL_CONTROLLER
2611 static void cxgb_netpoll(struct net_device *dev)
2612 {
2613         struct port_info *pi = netdev_priv(dev);
2614         struct adapter *adapter = pi->adapter;
2615         int qidx;
2616
2617         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2618                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2619                 void *source;
2620
2621                 if (adapter->flags & USING_MSIX)
2622                         source = qs;
2623                 else
2624                         source = adapter;
2625
2626                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2627         }
2628 }
2629 #endif
2630
2631 /*
2632  * Periodic accumulation of MAC statistics.
2633  */
2634 static void mac_stats_update(struct adapter *adapter)
2635 {
2636         int i;
2637
2638         for_each_port(adapter, i) {
2639                 struct net_device *dev = adapter->port[i];
2640                 struct port_info *p = netdev_priv(dev);
2641
2642                 if (netif_running(dev)) {
2643                         spin_lock(&adapter->stats_lock);
2644                         t3_mac_update_stats(&p->mac);
2645                         spin_unlock(&adapter->stats_lock);
2646                 }
2647         }
2648 }
2649
2650 static void check_link_status(struct adapter *adapter)
2651 {
2652         int i;
2653
2654         for_each_port(adapter, i) {
2655                 struct net_device *dev = adapter->port[i];
2656                 struct port_info *p = netdev_priv(dev);
2657                 int link_fault;
2658
2659                 spin_lock_irq(&adapter->work_lock);
2660                 link_fault = p->link_fault;
2661                 spin_unlock_irq(&adapter->work_lock);
2662
2663                 if (link_fault) {
2664                         t3_link_fault(adapter, i);
2665                         continue;
2666                 }
2667
2668                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2669                         t3_xgm_intr_disable(adapter, i);
2670                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2671
2672                         t3_link_changed(adapter, i);
2673                         t3_xgm_intr_enable(adapter, i);
2674                 }
2675         }
2676 }
2677
2678 static void check_t3b2_mac(struct adapter *adapter)
2679 {
2680         int i;
2681
2682         if (!rtnl_trylock())    /* synchronize with ifdown */
2683                 return;
2684
2685         for_each_port(adapter, i) {
2686                 struct net_device *dev = adapter->port[i];
2687                 struct port_info *p = netdev_priv(dev);
2688                 int status;
2689
2690                 if (!netif_running(dev))
2691                         continue;
2692
2693                 status = 0;
2694                 if (netif_running(dev) && netif_carrier_ok(dev))
2695                         status = t3b2_mac_watchdog_task(&p->mac);
2696                 if (status == 1)
2697                         p->mac.stats.num_toggled++;
2698                 else if (status == 2) {
2699                         struct cmac *mac = &p->mac;
2700
2701                         t3_mac_set_mtu(mac, dev->mtu);
2702                         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2703                         cxgb_set_rxmode(dev);
2704                         t3_link_start(&p->phy, mac, &p->link_config);
2705                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2706                         t3_port_intr_enable(adapter, p->port_id);
2707                         p->mac.stats.num_resets++;
2708                 }
2709         }
2710         rtnl_unlock();
2711 }
2712
2713
2714 static void t3_adap_check_task(struct work_struct *work)
2715 {
2716         struct adapter *adapter = container_of(work, struct adapter,
2717                                                adap_check_task.work);
2718         const struct adapter_params *p = &adapter->params;
2719         int port;
2720         unsigned int v, status, reset;
2721
2722         adapter->check_task_cnt++;
2723
2724         check_link_status(adapter);
2725
2726         /* Accumulate MAC stats if needed */
2727         if (!p->linkpoll_period ||
2728             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2729             p->stats_update_period) {
2730                 mac_stats_update(adapter);
2731                 adapter->check_task_cnt = 0;
2732         }
2733
2734         if (p->rev == T3_REV_B2)
2735                 check_t3b2_mac(adapter);
2736
2737         /*
2738          * Scan the XGMAC's to check for various conditions which we want to
2739          * monitor in a periodic polling manner rather than via an interrupt
2740          * condition.  This is used for conditions which would otherwise flood
2741          * the system with interrupts and we only really need to know that the
2742          * conditions are "happening" ...  For each condition we count the
2743          * detection of the condition and reset it for the next polling loop.
2744          */
2745         for_each_port(adapter, port) {
2746                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2747                 u32 cause;
2748
2749                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2750                 reset = 0;
2751                 if (cause & F_RXFIFO_OVERFLOW) {
2752                         mac->stats.rx_fifo_ovfl++;
2753                         reset |= F_RXFIFO_OVERFLOW;
2754                 }
2755
2756                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2757         }
2758
2759         /*
2760          * We do the same as above for FL_EMPTY interrupts.
2761          */
2762         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2763         reset = 0;
2764
2765         if (status & F_FLEMPTY) {
2766                 struct sge_qset *qs = &adapter->sge.qs[0];
2767                 int i = 0;
2768
2769                 reset |= F_FLEMPTY;
2770
2771                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2772                     0xffff;
2773
2774                 while (v) {
2775                         qs->fl[i].empty += (v & 1);
2776                         if (i)
2777                                 qs++;
2778                         i ^= 1;
2779                         v >>= 1;
2780                 }
2781         }
2782
2783         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2784
2785         /* Schedule the next check update if any port is active. */
2786         spin_lock_irq(&adapter->work_lock);
2787         if (adapter->open_device_map & PORT_MASK)
2788                 schedule_chk_task(adapter);
2789         spin_unlock_irq(&adapter->work_lock);
2790 }
2791
2792 static void db_full_task(struct work_struct *work)
2793 {
2794         struct adapter *adapter = container_of(work, struct adapter,
2795                                                db_full_task);
2796
2797         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2798 }
2799
2800 static void db_empty_task(struct work_struct *work)
2801 {
2802         struct adapter *adapter = container_of(work, struct adapter,
2803                                                db_empty_task);
2804
2805         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2806 }
2807
2808 static void db_drop_task(struct work_struct *work)
2809 {
2810         struct adapter *adapter = container_of(work, struct adapter,
2811                                                db_drop_task);
2812         unsigned long delay = 1000;
2813         unsigned short r;
2814
2815         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2816
2817         /*
2818          * Sleep a while before ringing the driver qset dbs.
2819          * The delay is between 1000-2023 usecs.
2820          */
2821         get_random_bytes(&r, 2);
2822         delay += r & 1023;
2823         set_current_state(TASK_UNINTERRUPTIBLE);
2824         schedule_timeout(usecs_to_jiffies(delay));
2825         ring_dbs(adapter);
2826 }
2827
2828 /*
2829  * Processes external (PHY) interrupts in process context.
2830  */
2831 static void ext_intr_task(struct work_struct *work)
2832 {
2833         struct adapter *adapter = container_of(work, struct adapter,
2834                                                ext_intr_handler_task);
2835         int i;
2836
2837         /* Disable link fault interrupts */
2838         for_each_port(adapter, i) {
2839                 struct net_device *dev = adapter->port[i];
2840                 struct port_info *p = netdev_priv(dev);
2841
2842                 t3_xgm_intr_disable(adapter, i);
2843                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2844         }
2845
2846         /* Re-enable link fault interrupts */
2847         t3_phy_intr_handler(adapter);
2848
2849         for_each_port(adapter, i)
2850                 t3_xgm_intr_enable(adapter, i);
2851
2852         /* Now reenable external interrupts */
2853         spin_lock_irq(&adapter->work_lock);
2854         if (adapter->slow_intr_mask) {
2855                 adapter->slow_intr_mask |= F_T3DBG;
2856                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2857                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2858                              adapter->slow_intr_mask);
2859         }
2860         spin_unlock_irq(&adapter->work_lock);
2861 }
2862
2863 /*
2864  * Interrupt-context handler for external (PHY) interrupts.
2865  */
2866 void t3_os_ext_intr_handler(struct adapter *adapter)
2867 {
2868         /*
2869          * Schedule a task to handle external interrupts as they may be slow
2870          * and we use a mutex to protect MDIO registers.  We disable PHY
2871          * interrupts in the meantime and let the task reenable them when
2872          * it's done.
2873          */
2874         spin_lock(&adapter->work_lock);
2875         if (adapter->slow_intr_mask) {
2876                 adapter->slow_intr_mask &= ~F_T3DBG;
2877                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2878                              adapter->slow_intr_mask);
2879                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2880         }
2881         spin_unlock(&adapter->work_lock);
2882 }
2883
2884 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2885 {
2886         struct net_device *netdev = adapter->port[port_id];
2887         struct port_info *pi = netdev_priv(netdev);
2888
2889         spin_lock(&adapter->work_lock);
2890         pi->link_fault = 1;
2891         spin_unlock(&adapter->work_lock);
2892 }
2893
2894 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2895 {
2896         int i, ret = 0;
2897
2898         if (is_offload(adapter) &&
2899             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2900                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2901                 offload_close(&adapter->tdev);
2902         }
2903
2904         /* Stop all ports */
2905         for_each_port(adapter, i) {
2906                 struct net_device *netdev = adapter->port[i];
2907
2908                 if (netif_running(netdev))
2909                         __cxgb_close(netdev, on_wq);
2910         }
2911
2912         /* Stop SGE timers */
2913         t3_stop_sge_timers(adapter);
2914
2915         adapter->flags &= ~FULL_INIT_DONE;
2916
2917         if (reset)
2918                 ret = t3_reset_adapter(adapter);
2919
2920         pci_disable_device(adapter->pdev);
2921
2922         return ret;
2923 }
2924
2925 static int t3_reenable_adapter(struct adapter *adapter)
2926 {
2927         if (pci_enable_device(adapter->pdev)) {
2928                 dev_err(&adapter->pdev->dev,
2929                         "Cannot re-enable PCI device after reset.\n");
2930                 goto err;
2931         }
2932         pci_set_master(adapter->pdev);
2933         pci_restore_state(adapter->pdev);
2934         pci_save_state(adapter->pdev);
2935
2936         /* Free sge resources */
2937         t3_free_sge_resources(adapter);
2938
2939         if (t3_replay_prep_adapter(adapter))
2940                 goto err;
2941
2942         return 0;
2943 err:
2944         return -1;
2945 }
2946
2947 static void t3_resume_ports(struct adapter *adapter)
2948 {
2949         int i;
2950
2951         /* Restart the ports */
2952         for_each_port(adapter, i) {
2953                 struct net_device *netdev = adapter->port[i];
2954
2955                 if (netif_running(netdev)) {
2956                         if (cxgb_open(netdev)) {
2957                                 dev_err(&adapter->pdev->dev,
2958                                         "can't bring device back up"
2959                                         " after reset\n");
2960                                 continue;
2961                         }
2962                 }
2963         }
2964
2965         if (is_offload(adapter) && !ofld_disable)
2966                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2967 }
2968
2969 /*
2970  * processes a fatal error.
2971  * Bring the ports down, reset the chip, bring the ports back up.
2972  */
2973 static void fatal_error_task(struct work_struct *work)
2974 {
2975         struct adapter *adapter = container_of(work, struct adapter,
2976                                                fatal_error_handler_task);
2977         int err = 0;
2978
2979         rtnl_lock();
2980         err = t3_adapter_error(adapter, 1, 1);
2981         if (!err)
2982                 err = t3_reenable_adapter(adapter);
2983         if (!err)
2984                 t3_resume_ports(adapter);
2985
2986         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2987         rtnl_unlock();
2988 }
2989
2990 void t3_fatal_err(struct adapter *adapter)
2991 {
2992         unsigned int fw_status[4];
2993
2994         if (adapter->flags & FULL_INIT_DONE) {
2995                 t3_sge_stop(adapter);
2996                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2997                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2998                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2999                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
3000
3001                 spin_lock(&adapter->work_lock);
3002                 t3_intr_disable(adapter);
3003                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
3004                 spin_unlock(&adapter->work_lock);
3005         }
3006         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3007         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3008                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3009                          fw_status[0], fw_status[1],
3010                          fw_status[2], fw_status[3]);
3011 }
3012
3013 /**
3014  * t3_io_error_detected - called when PCI error is detected
3015  * @pdev: Pointer to PCI device
3016  * @state: The current pci connection state
3017  *
3018  * This function is called after a PCI bus error affecting
3019  * this device has been detected.
3020  */
3021 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3022                                              pci_channel_state_t state)
3023 {
3024         struct adapter *adapter = pci_get_drvdata(pdev);
3025
3026         if (state == pci_channel_io_perm_failure)
3027                 return PCI_ERS_RESULT_DISCONNECT;
3028
3029         t3_adapter_error(adapter, 0, 0);
3030
3031         /* Request a slot reset. */
3032         return PCI_ERS_RESULT_NEED_RESET;
3033 }
3034
3035 /**
3036  * t3_io_slot_reset - called after the pci bus has been reset.
3037  * @pdev: Pointer to PCI device
3038  *
3039  * Restart the card from scratch, as if from a cold-boot.
3040  */
3041 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3042 {
3043         struct adapter *adapter = pci_get_drvdata(pdev);
3044
3045         if (!t3_reenable_adapter(adapter))
3046                 return PCI_ERS_RESULT_RECOVERED;
3047
3048         return PCI_ERS_RESULT_DISCONNECT;
3049 }
3050
3051 /**
3052  * t3_io_resume - called when traffic can start flowing again.
3053  * @pdev: Pointer to PCI device
3054  *
3055  * This callback is called when the error recovery driver tells us that
3056  * its OK to resume normal operation.
3057  */
3058 static void t3_io_resume(struct pci_dev *pdev)
3059 {
3060         struct adapter *adapter = pci_get_drvdata(pdev);
3061
3062         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3063                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
3064
3065         rtnl_lock();
3066         t3_resume_ports(adapter);
3067         rtnl_unlock();
3068 }
3069
3070 static const struct pci_error_handlers t3_err_handler = {
3071         .error_detected = t3_io_error_detected,
3072         .slot_reset = t3_io_slot_reset,
3073         .resume = t3_io_resume,
3074 };
3075
3076 /*
3077  * Set the number of qsets based on the number of CPUs and the number of ports,
3078  * not to exceed the number of available qsets, assuming there are enough qsets
3079  * per port in HW.
3080  */
3081 static void set_nqsets(struct adapter *adap)
3082 {
3083         int i, j = 0;
3084         int num_cpus = netif_get_num_default_rss_queues();
3085         int hwports = adap->params.nports;
3086         int nqsets = adap->msix_nvectors - 1;
3087
3088         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3089                 if (hwports == 2 &&
3090                     (hwports * nqsets > SGE_QSETS ||
3091                      num_cpus >= nqsets / hwports))
3092                         nqsets /= hwports;
3093                 if (nqsets > num_cpus)
3094                         nqsets = num_cpus;
3095                 if (nqsets < 1 || hwports == 4)
3096                         nqsets = 1;
3097         } else
3098                 nqsets = 1;
3099
3100         for_each_port(adap, i) {
3101                 struct port_info *pi = adap2pinfo(adap, i);
3102
3103                 pi->first_qset = j;
3104                 pi->nqsets = nqsets;
3105                 j = pi->first_qset + nqsets;
3106
3107                 dev_info(&adap->pdev->dev,
3108                          "Port %d using %d queue sets.\n", i, nqsets);
3109         }
3110 }
3111
3112 static int cxgb_enable_msix(struct adapter *adap)
3113 {
3114         struct msix_entry entries[SGE_QSETS + 1];
3115         int vectors;
3116         int i;
3117
3118         vectors = ARRAY_SIZE(entries);
3119         for (i = 0; i < vectors; ++i)
3120                 entries[i].entry = i;
3121
3122         vectors = pci_enable_msix_range(adap->pdev, entries,
3123                                         adap->params.nports + 1, vectors);
3124         if (vectors < 0)
3125                 return vectors;
3126
3127         for (i = 0; i < vectors; ++i)
3128                 adap->msix_info[i].vec = entries[i].vector;
3129         adap->msix_nvectors = vectors;
3130
3131         return 0;
3132 }
3133
3134 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3135 {
3136         static const char *pci_variant[] = {
3137                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3138         };
3139
3140         int i;
3141         char buf[80];
3142
3143         if (is_pcie(adap))
3144                 snprintf(buf, sizeof(buf), "%s x%d",
3145                          pci_variant[adap->params.pci.variant],
3146                          adap->params.pci.width);
3147         else
3148                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3149                          pci_variant[adap->params.pci.variant],
3150                          adap->params.pci.speed, adap->params.pci.width);
3151
3152         for_each_port(adap, i) {
3153                 struct net_device *dev = adap->port[i];
3154                 const struct port_info *pi = netdev_priv(dev);
3155
3156                 if (!test_bit(i, &adap->registered_device_map))
3157                         continue;
3158                 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3159                             ai->desc, pi->phy.desc,
3160                             is_offload(adap) ? "R" : "", adap->params.rev, buf,
3161                             (adap->flags & USING_MSIX) ? " MSI-X" :
3162                             (adap->flags & USING_MSI) ? " MSI" : "");
3163                 if (adap->name == dev->name && adap->params.vpd.mclk)
3164                         pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3165                                adap->name, t3_mc7_size(&adap->cm) >> 20,
3166                                t3_mc7_size(&adap->pmtx) >> 20,
3167                                t3_mc7_size(&adap->pmrx) >> 20,
3168                                adap->params.vpd.sn);
3169         }
3170 }
3171
3172 static const struct net_device_ops cxgb_netdev_ops = {
3173         .ndo_open               = cxgb_open,
3174         .ndo_stop               = cxgb_close,
3175         .ndo_start_xmit         = t3_eth_xmit,
3176         .ndo_get_stats          = cxgb_get_stats,
3177         .ndo_validate_addr      = eth_validate_addr,
3178         .ndo_set_rx_mode        = cxgb_set_rxmode,
3179         .ndo_do_ioctl           = cxgb_ioctl,
3180         .ndo_change_mtu         = cxgb_change_mtu,
3181         .ndo_set_mac_address    = cxgb_set_mac_addr,
3182         .ndo_fix_features       = cxgb_fix_features,
3183         .ndo_set_features       = cxgb_set_features,
3184 #ifdef CONFIG_NET_POLL_CONTROLLER
3185         .ndo_poll_controller    = cxgb_netpoll,
3186 #endif
3187 };
3188
3189 static void cxgb3_init_iscsi_mac(struct net_device *dev)
3190 {
3191         struct port_info *pi = netdev_priv(dev);
3192
3193         memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3194         pi->iscsic.mac_addr[3] |= 0x80;
3195 }
3196
3197 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3198 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3199                         NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3200 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3201 {
3202         int i, err, pci_using_dac = 0;
3203         resource_size_t mmio_start, mmio_len;
3204         const struct adapter_info *ai;
3205         struct adapter *adapter = NULL;
3206         struct port_info *pi;
3207
3208         pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3209
3210         if (!cxgb3_wq) {
3211                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3212                 if (!cxgb3_wq) {
3213                         pr_err("cannot initialize work queue\n");
3214                         return -ENOMEM;
3215                 }
3216         }
3217
3218         err = pci_enable_device(pdev);
3219         if (err) {
3220                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3221                 goto out;
3222         }
3223
3224         err = pci_request_regions(pdev, DRV_NAME);
3225         if (err) {
3226                 /* Just info, some other driver may have claimed the device. */
3227                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3228                 goto out_disable_device;
3229         }
3230
3231         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3232                 pci_using_dac = 1;
3233                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3234                 if (err) {
3235                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3236                                "coherent allocations\n");
3237                         goto out_release_regions;
3238                 }
3239         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3240                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3241                 goto out_release_regions;
3242         }
3243
3244         pci_set_master(pdev);
3245         pci_save_state(pdev);
3246
3247         mmio_start = pci_resource_start(pdev, 0);
3248         mmio_len = pci_resource_len(pdev, 0);
3249         ai = t3_get_adapter_info(ent->driver_data);
3250
3251         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3252         if (!adapter) {
3253                 err = -ENOMEM;
3254                 goto out_release_regions;
3255         }
3256
3257         adapter->nofail_skb =
3258                 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3259         if (!adapter->nofail_skb) {
3260                 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3261                 err = -ENOMEM;
3262                 goto out_free_adapter;
3263         }
3264
3265         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3266         if (!adapter->regs) {
3267                 dev_err(&pdev->dev, "cannot map device registers\n");
3268                 err = -ENOMEM;
3269                 goto out_free_adapter_nofail;
3270         }
3271
3272         adapter->pdev = pdev;
3273         adapter->name = pci_name(pdev);
3274         adapter->msg_enable = dflt_msg_enable;
3275         adapter->mmio_len = mmio_len;
3276
3277         mutex_init(&adapter->mdio_lock);
3278         spin_lock_init(&adapter->work_lock);
3279         spin_lock_init(&adapter->stats_lock);
3280
3281         INIT_LIST_HEAD(&adapter->adapter_list);
3282         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3283         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3284
3285         INIT_WORK(&adapter->db_full_task, db_full_task);
3286         INIT_WORK(&adapter->db_empty_task, db_empty_task);
3287         INIT_WORK(&adapter->db_drop_task, db_drop_task);
3288
3289         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3290
3291         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3292                 struct net_device *netdev;
3293
3294                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3295                 if (!netdev) {
3296                         err = -ENOMEM;
3297                         goto out_free_dev;
3298                 }
3299
3300                 SET_NETDEV_DEV(netdev, &pdev->dev);
3301
3302                 adapter->port[i] = netdev;
3303                 pi = netdev_priv(netdev);
3304                 pi->adapter = adapter;
3305                 pi->port_id = i;
3306                 netif_carrier_off(netdev);
3307                 netdev->irq = pdev->irq;
3308                 netdev->mem_start = mmio_start;
3309                 netdev->mem_end = mmio_start + mmio_len - 1;
3310                 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3311                         NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3312                 netdev->features |= netdev->hw_features |
3313                                     NETIF_F_HW_VLAN_CTAG_TX;
3314                 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3315                 if (pci_using_dac)
3316                         netdev->features |= NETIF_F_HIGHDMA;
3317
3318                 netdev->netdev_ops = &cxgb_netdev_ops;
3319                 netdev->ethtool_ops = &cxgb_ethtool_ops;
3320                 netdev->min_mtu = 81;
3321                 netdev->max_mtu = ETH_MAX_MTU;
3322                 netdev->dev_port = pi->port_id;
3323         }
3324
3325         pci_set_drvdata(pdev, adapter);
3326         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3327                 err = -ENODEV;
3328                 goto out_free_dev;
3329         }
3330
3331         /*
3332          * The card is now ready to go.  If any errors occur during device
3333          * registration we do not fail the whole card but rather proceed only
3334          * with the ports we manage to register successfully.  However we must
3335          * register at least one net device.
3336          */
3337         for_each_port(adapter, i) {
3338                 err = register_netdev(adapter->port[i]);
3339                 if (err)
3340                         dev_warn(&pdev->dev,
3341                                  "cannot register net device %s, skipping\n",
3342                                  adapter->port[i]->name);
3343                 else {
3344                         /*
3345                          * Change the name we use for messages to the name of
3346                          * the first successfully registered interface.
3347                          */
3348                         if (!adapter->registered_device_map)
3349                                 adapter->name = adapter->port[i]->name;
3350
3351                         __set_bit(i, &adapter->registered_device_map);
3352                 }
3353         }
3354         if (!adapter->registered_device_map) {
3355                 dev_err(&pdev->dev, "could not register any net devices\n");
3356                 goto out_free_dev;
3357         }
3358
3359         for_each_port(adapter, i)
3360                 cxgb3_init_iscsi_mac(adapter->port[i]);
3361
3362         /* Driver's ready. Reflect it on LEDs */
3363         t3_led_ready(adapter);
3364
3365         if (is_offload(adapter)) {
3366                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3367                 cxgb3_adapter_ofld(adapter);
3368         }
3369
3370         /* See what interrupts we'll be using */
3371         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3372                 adapter->flags |= USING_MSIX;
3373         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3374                 adapter->flags |= USING_MSI;
3375
3376         set_nqsets(adapter);
3377
3378         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3379                                  &cxgb3_attr_group);
3380         if (err) {
3381                 dev_err(&pdev->dev, "cannot create sysfs group\n");
3382                 goto out_close_led;
3383         }
3384
3385         print_port_info(adapter, ai);
3386         return 0;
3387
3388 out_close_led:
3389         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
3390
3391 out_free_dev:
3392         iounmap(adapter->regs);
3393         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3394                 if (adapter->port[i])
3395                         free_netdev(adapter->port[i]);
3396
3397 out_free_adapter_nofail:
3398         kfree_skb(adapter->nofail_skb);
3399
3400 out_free_adapter:
3401         kfree(adapter);
3402
3403 out_release_regions:
3404         pci_release_regions(pdev);
3405 out_disable_device:
3406         pci_disable_device(pdev);
3407 out:
3408         return err;
3409 }
3410
3411 static void remove_one(struct pci_dev *pdev)
3412 {
3413         struct adapter *adapter = pci_get_drvdata(pdev);
3414
3415         if (adapter) {
3416                 int i;
3417
3418                 t3_sge_stop(adapter);
3419                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3420                                    &cxgb3_attr_group);
3421
3422                 if (is_offload(adapter)) {
3423                         cxgb3_adapter_unofld(adapter);
3424                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3425                                      &adapter->open_device_map))
3426                                 offload_close(&adapter->tdev);
3427                 }
3428
3429                 for_each_port(adapter, i)
3430                     if (test_bit(i, &adapter->registered_device_map))
3431                         unregister_netdev(adapter->port[i]);
3432
3433                 t3_stop_sge_timers(adapter);
3434                 t3_free_sge_resources(adapter);
3435                 cxgb_disable_msi(adapter);
3436
3437                 for_each_port(adapter, i)
3438                         if (adapter->port[i])
3439                                 free_netdev(adapter->port[i]);
3440
3441                 iounmap(adapter->regs);
3442                 if (adapter->nofail_skb)
3443                         kfree_skb(adapter->nofail_skb);
3444                 kfree(adapter);
3445                 pci_release_regions(pdev);
3446                 pci_disable_device(pdev);
3447         }
3448 }
3449
3450 static struct pci_driver driver = {
3451         .name = DRV_NAME,
3452         .id_table = cxgb3_pci_tbl,
3453         .probe = init_one,
3454         .remove = remove_one,
3455         .err_handler = &t3_err_handler,
3456 };
3457
3458 static int __init cxgb3_init_module(void)
3459 {
3460         int ret;
3461
3462         cxgb3_offload_init();
3463
3464         ret = pci_register_driver(&driver);
3465         return ret;
3466 }
3467
3468 static void __exit cxgb3_cleanup_module(void)
3469 {
3470         pci_unregister_driver(&driver);
3471         if (cxgb3_wq)
3472                 destroy_workqueue(cxgb3_wq);
3473 }
3474
3475 module_init(cxgb3_init_module);
3476 module_exit(cxgb3_cleanup_module);