GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / net / ethernet / chelsio / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/init.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/if_vlan.h>
43 #include <linux/mdio.h>
44 #include <linux/sockios.h>
45 #include <linux/workqueue.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/stringify.h>
51 #include <linux/sched.h>
52 #include <linux/slab.h>
53 #include <linux/uaccess.h>
54 #include <linux/nospec.h>
55
56 #include "common.h"
57 #include "cxgb3_ioctl.h"
58 #include "regs.h"
59 #include "cxgb3_offload.h"
60 #include "version.h"
61
62 #include "cxgb3_ctl_defs.h"
63 #include "t3_cpl.h"
64 #include "firmware_exports.h"
65
66 enum {
67         MAX_TXQ_ENTRIES = 16384,
68         MAX_CTRL_TXQ_ENTRIES = 1024,
69         MAX_RSPQ_ENTRIES = 16384,
70         MAX_RX_BUFFERS = 16384,
71         MAX_RX_JUMBO_BUFFERS = 16384,
72         MIN_TXQ_ENTRIES = 4,
73         MIN_CTRL_TXQ_ENTRIES = 4,
74         MIN_RSPQ_ENTRIES = 32,
75         MIN_FL_ENTRIES = 32
76 };
77
78 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
79
80 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
81                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
82                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
83
84 #define EEPROM_MAGIC 0x38E2F10C
85
86 #define CH_DEVICE(devid, idx) \
87         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
88
89 static const struct pci_device_id cxgb3_pci_tbl[] = {
90         CH_DEVICE(0x20, 0),     /* PE9000 */
91         CH_DEVICE(0x21, 1),     /* T302E */
92         CH_DEVICE(0x22, 2),     /* T310E */
93         CH_DEVICE(0x23, 3),     /* T320X */
94         CH_DEVICE(0x24, 1),     /* T302X */
95         CH_DEVICE(0x25, 3),     /* T320E */
96         CH_DEVICE(0x26, 2),     /* T310X */
97         CH_DEVICE(0x30, 2),     /* T3B10 */
98         CH_DEVICE(0x31, 3),     /* T3B20 */
99         CH_DEVICE(0x32, 1),     /* T3B02 */
100         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
101         CH_DEVICE(0x36, 3),     /* S320E-CR */
102         CH_DEVICE(0x37, 7),     /* N320E-G2 */
103         {0,}
104 };
105
106 MODULE_DESCRIPTION(DRV_DESC);
107 MODULE_AUTHOR("Chelsio Communications");
108 MODULE_LICENSE("Dual BSD/GPL");
109 MODULE_VERSION(DRV_VERSION);
110 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
111
112 static int dflt_msg_enable = DFLT_MSG_ENABLE;
113
114 module_param(dflt_msg_enable, int, 0644);
115 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
116
117 /*
118  * The driver uses the best interrupt scheme available on a platform in the
119  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
120  * of these schemes the driver may consider as follows:
121  *
122  * msi = 2: choose from among all three options
123  * msi = 1: only consider MSI and pin interrupts
124  * msi = 0: force pin interrupts
125  */
126 static int msi = 2;
127
128 module_param(msi, int, 0644);
129 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
130
131 /*
132  * The driver enables offload as a default.
133  * To disable it, use ofld_disable = 1.
134  */
135
136 static int ofld_disable = 0;
137
138 module_param(ofld_disable, int, 0644);
139 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140
141 /*
142  * We have work elements that we need to cancel when an interface is taken
143  * down.  Normally the work elements would be executed by keventd but that
144  * can deadlock because of linkwatch.  If our close method takes the rtnl
145  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
146  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
147  * for our work to complete.  Get our own work queue to solve this.
148  */
149 struct workqueue_struct *cxgb3_wq;
150
151 /**
152  *      link_report - show link status and link speed/duplex
153  *      @p: the port whose settings are to be reported
154  *
155  *      Shows the link status, speed, and duplex of a port.
156  */
157 static void link_report(struct net_device *dev)
158 {
159         if (!netif_carrier_ok(dev))
160                 netdev_info(dev, "link down\n");
161         else {
162                 const char *s = "10Mbps";
163                 const struct port_info *p = netdev_priv(dev);
164
165                 switch (p->link_config.speed) {
166                 case SPEED_10000:
167                         s = "10Gbps";
168                         break;
169                 case SPEED_1000:
170                         s = "1000Mbps";
171                         break;
172                 case SPEED_100:
173                         s = "100Mbps";
174                         break;
175                 }
176
177                 netdev_info(dev, "link up, %s, %s-duplex\n",
178                             s, p->link_config.duplex == DUPLEX_FULL
179                             ? "full" : "half");
180         }
181 }
182
183 static void enable_tx_fifo_drain(struct adapter *adapter,
184                                  struct port_info *pi)
185 {
186         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
187                          F_ENDROPPKT);
188         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
189         t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
190         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
191 }
192
193 static void disable_tx_fifo_drain(struct adapter *adapter,
194                                   struct port_info *pi)
195 {
196         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
197                          F_ENDROPPKT, 0);
198 }
199
200 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
201 {
202         struct net_device *dev = adap->port[port_id];
203         struct port_info *pi = netdev_priv(dev);
204
205         if (state == netif_carrier_ok(dev))
206                 return;
207
208         if (state) {
209                 struct cmac *mac = &pi->mac;
210
211                 netif_carrier_on(dev);
212
213                 disable_tx_fifo_drain(adap, pi);
214
215                 /* Clear local faults */
216                 t3_xgm_intr_disable(adap, pi->port_id);
217                 t3_read_reg(adap, A_XGM_INT_STATUS +
218                                     pi->mac.offset);
219                 t3_write_reg(adap,
220                              A_XGM_INT_CAUSE + pi->mac.offset,
221                              F_XGM_INT);
222
223                 t3_set_reg_field(adap,
224                                  A_XGM_INT_ENABLE +
225                                  pi->mac.offset,
226                                  F_XGM_INT, F_XGM_INT);
227                 t3_xgm_intr_enable(adap, pi->port_id);
228
229                 t3_mac_enable(mac, MAC_DIRECTION_TX);
230         } else {
231                 netif_carrier_off(dev);
232
233                 /* Flush TX FIFO */
234                 enable_tx_fifo_drain(adap, pi);
235         }
236         link_report(dev);
237 }
238
239 /**
240  *      t3_os_link_changed - handle link status changes
241  *      @adapter: the adapter associated with the link change
242  *      @port_id: the port index whose limk status has changed
243  *      @link_stat: the new status of the link
244  *      @speed: the new speed setting
245  *      @duplex: the new duplex setting
246  *      @pause: the new flow-control setting
247  *
248  *      This is the OS-dependent handler for link status changes.  The OS
249  *      neutral handler takes care of most of the processing for these events,
250  *      then calls this handler for any OS-specific processing.
251  */
252 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
253                         int speed, int duplex, int pause)
254 {
255         struct net_device *dev = adapter->port[port_id];
256         struct port_info *pi = netdev_priv(dev);
257         struct cmac *mac = &pi->mac;
258
259         /* Skip changes from disabled ports. */
260         if (!netif_running(dev))
261                 return;
262
263         if (link_stat != netif_carrier_ok(dev)) {
264                 if (link_stat) {
265                         disable_tx_fifo_drain(adapter, pi);
266
267                         t3_mac_enable(mac, MAC_DIRECTION_RX);
268
269                         /* Clear local faults */
270                         t3_xgm_intr_disable(adapter, pi->port_id);
271                         t3_read_reg(adapter, A_XGM_INT_STATUS +
272                                     pi->mac.offset);
273                         t3_write_reg(adapter,
274                                      A_XGM_INT_CAUSE + pi->mac.offset,
275                                      F_XGM_INT);
276
277                         t3_set_reg_field(adapter,
278                                          A_XGM_INT_ENABLE + pi->mac.offset,
279                                          F_XGM_INT, F_XGM_INT);
280                         t3_xgm_intr_enable(adapter, pi->port_id);
281
282                         netif_carrier_on(dev);
283                 } else {
284                         netif_carrier_off(dev);
285
286                         t3_xgm_intr_disable(adapter, pi->port_id);
287                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
288                         t3_set_reg_field(adapter,
289                                          A_XGM_INT_ENABLE + pi->mac.offset,
290                                          F_XGM_INT, 0);
291
292                         if (is_10G(adapter))
293                                 pi->phy.ops->power_down(&pi->phy, 1);
294
295                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
296                         t3_mac_disable(mac, MAC_DIRECTION_RX);
297                         t3_link_start(&pi->phy, mac, &pi->link_config);
298
299                         /* Flush TX FIFO */
300                         enable_tx_fifo_drain(adapter, pi);
301                 }
302
303                 link_report(dev);
304         }
305 }
306
307 /**
308  *      t3_os_phymod_changed - handle PHY module changes
309  *      @phy: the PHY reporting the module change
310  *      @mod_type: new module type
311  *
312  *      This is the OS-dependent handler for PHY module changes.  It is
313  *      invoked when a PHY module is removed or inserted for any OS-specific
314  *      processing.
315  */
316 void t3_os_phymod_changed(struct adapter *adap, int port_id)
317 {
318         static const char *mod_str[] = {
319                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
320         };
321
322         const struct net_device *dev = adap->port[port_id];
323         const struct port_info *pi = netdev_priv(dev);
324
325         if (pi->phy.modtype == phy_modtype_none)
326                 netdev_info(dev, "PHY module unplugged\n");
327         else
328                 netdev_info(dev, "%s PHY module inserted\n",
329                             mod_str[pi->phy.modtype]);
330 }
331
332 static void cxgb_set_rxmode(struct net_device *dev)
333 {
334         struct port_info *pi = netdev_priv(dev);
335
336         t3_mac_set_rx_mode(&pi->mac, dev);
337 }
338
339 /**
340  *      link_start - enable a port
341  *      @dev: the device to enable
342  *
343  *      Performs the MAC and PHY actions needed to enable a port.
344  */
345 static void link_start(struct net_device *dev)
346 {
347         struct port_info *pi = netdev_priv(dev);
348         struct cmac *mac = &pi->mac;
349
350         t3_mac_reset(mac);
351         t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
352         t3_mac_set_mtu(mac, dev->mtu);
353         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
354         t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
355         t3_mac_set_rx_mode(mac, dev);
356         t3_link_start(&pi->phy, mac, &pi->link_config);
357         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
358 }
359
360 static inline void cxgb_disable_msi(struct adapter *adapter)
361 {
362         if (adapter->flags & USING_MSIX) {
363                 pci_disable_msix(adapter->pdev);
364                 adapter->flags &= ~USING_MSIX;
365         } else if (adapter->flags & USING_MSI) {
366                 pci_disable_msi(adapter->pdev);
367                 adapter->flags &= ~USING_MSI;
368         }
369 }
370
371 /*
372  * Interrupt handler for asynchronous events used with MSI-X.
373  */
374 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
375 {
376         t3_slow_intr_handler(cookie);
377         return IRQ_HANDLED;
378 }
379
380 /*
381  * Name the MSI-X interrupts.
382  */
383 static void name_msix_vecs(struct adapter *adap)
384 {
385         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
386
387         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
388         adap->msix_info[0].desc[n] = 0;
389
390         for_each_port(adap, j) {
391                 struct net_device *d = adap->port[j];
392                 const struct port_info *pi = netdev_priv(d);
393
394                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
395                         snprintf(adap->msix_info[msi_idx].desc, n,
396                                  "%s-%d", d->name, pi->first_qset + i);
397                         adap->msix_info[msi_idx].desc[n] = 0;
398                 }
399         }
400 }
401
402 static int request_msix_data_irqs(struct adapter *adap)
403 {
404         int i, j, err, qidx = 0;
405
406         for_each_port(adap, i) {
407                 int nqsets = adap2pinfo(adap, i)->nqsets;
408
409                 for (j = 0; j < nqsets; ++j) {
410                         err = request_irq(adap->msix_info[qidx + 1].vec,
411                                           t3_intr_handler(adap,
412                                                           adap->sge.qs[qidx].
413                                                           rspq.polling), 0,
414                                           adap->msix_info[qidx + 1].desc,
415                                           &adap->sge.qs[qidx]);
416                         if (err) {
417                                 while (--qidx >= 0)
418                                         free_irq(adap->msix_info[qidx + 1].vec,
419                                                  &adap->sge.qs[qidx]);
420                                 return err;
421                         }
422                         qidx++;
423                 }
424         }
425         return 0;
426 }
427
428 static void free_irq_resources(struct adapter *adapter)
429 {
430         if (adapter->flags & USING_MSIX) {
431                 int i, n = 0;
432
433                 free_irq(adapter->msix_info[0].vec, adapter);
434                 for_each_port(adapter, i)
435                         n += adap2pinfo(adapter, i)->nqsets;
436
437                 for (i = 0; i < n; ++i)
438                         free_irq(adapter->msix_info[i + 1].vec,
439                                  &adapter->sge.qs[i]);
440         } else
441                 free_irq(adapter->pdev->irq, adapter);
442 }
443
444 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
445                               unsigned long n)
446 {
447         int attempts = 10;
448
449         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
450                 if (!--attempts)
451                         return -ETIMEDOUT;
452                 msleep(10);
453         }
454         return 0;
455 }
456
457 static int init_tp_parity(struct adapter *adap)
458 {
459         int i;
460         struct sk_buff *skb;
461         struct cpl_set_tcb_field *greq;
462         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
463
464         t3_tp_set_offload_mode(adap, 1);
465
466         for (i = 0; i < 16; i++) {
467                 struct cpl_smt_write_req *req;
468
469                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
470                 if (!skb)
471                         skb = adap->nofail_skb;
472                 if (!skb)
473                         goto alloc_skb_fail;
474
475                 req = __skb_put_zero(skb, sizeof(*req));
476                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
477                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
478                 req->mtu_idx = NMTUS - 1;
479                 req->iff = i;
480                 t3_mgmt_tx(adap, skb);
481                 if (skb == adap->nofail_skb) {
482                         await_mgmt_replies(adap, cnt, i + 1);
483                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
484                         if (!adap->nofail_skb)
485                                 goto alloc_skb_fail;
486                 }
487         }
488
489         for (i = 0; i < 2048; i++) {
490                 struct cpl_l2t_write_req *req;
491
492                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
493                 if (!skb)
494                         skb = adap->nofail_skb;
495                 if (!skb)
496                         goto alloc_skb_fail;
497
498                 req = __skb_put_zero(skb, sizeof(*req));
499                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
500                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
501                 req->params = htonl(V_L2T_W_IDX(i));
502                 t3_mgmt_tx(adap, skb);
503                 if (skb == adap->nofail_skb) {
504                         await_mgmt_replies(adap, cnt, 16 + i + 1);
505                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
506                         if (!adap->nofail_skb)
507                                 goto alloc_skb_fail;
508                 }
509         }
510
511         for (i = 0; i < 2048; i++) {
512                 struct cpl_rte_write_req *req;
513
514                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
515                 if (!skb)
516                         skb = adap->nofail_skb;
517                 if (!skb)
518                         goto alloc_skb_fail;
519
520                 req = __skb_put_zero(skb, sizeof(*req));
521                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
522                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
523                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
524                 t3_mgmt_tx(adap, skb);
525                 if (skb == adap->nofail_skb) {
526                         await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
527                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
528                         if (!adap->nofail_skb)
529                                 goto alloc_skb_fail;
530                 }
531         }
532
533         skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
534         if (!skb)
535                 skb = adap->nofail_skb;
536         if (!skb)
537                 goto alloc_skb_fail;
538
539         greq = __skb_put_zero(skb, sizeof(*greq));
540         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
541         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
542         greq->mask = cpu_to_be64(1);
543         t3_mgmt_tx(adap, skb);
544
545         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
546         if (skb == adap->nofail_skb) {
547                 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
548                 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
549         }
550
551         t3_tp_set_offload_mode(adap, 0);
552         return i;
553
554 alloc_skb_fail:
555         t3_tp_set_offload_mode(adap, 0);
556         return -ENOMEM;
557 }
558
559 /**
560  *      setup_rss - configure RSS
561  *      @adap: the adapter
562  *
563  *      Sets up RSS to distribute packets to multiple receive queues.  We
564  *      configure the RSS CPU lookup table to distribute to the number of HW
565  *      receive queues, and the response queue lookup table to narrow that
566  *      down to the response queues actually configured for each port.
567  *      We always configure the RSS mapping for two ports since the mapping
568  *      table has plenty of entries.
569  */
570 static void setup_rss(struct adapter *adap)
571 {
572         int i;
573         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
574         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
575         u8 cpus[SGE_QSETS + 1];
576         u16 rspq_map[RSS_TABLE_SIZE + 1];
577
578         for (i = 0; i < SGE_QSETS; ++i)
579                 cpus[i] = i;
580         cpus[SGE_QSETS] = 0xff; /* terminator */
581
582         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
583                 rspq_map[i] = i % nq0;
584                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
585         }
586         rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
587
588         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
589                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
590                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
591 }
592
593 static void ring_dbs(struct adapter *adap)
594 {
595         int i, j;
596
597         for (i = 0; i < SGE_QSETS; i++) {
598                 struct sge_qset *qs = &adap->sge.qs[i];
599
600                 if (qs->adap)
601                         for (j = 0; j < SGE_TXQ_PER_SET; j++)
602                                 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
603         }
604 }
605
606 static void init_napi(struct adapter *adap)
607 {
608         int i;
609
610         for (i = 0; i < SGE_QSETS; i++) {
611                 struct sge_qset *qs = &adap->sge.qs[i];
612
613                 if (qs->adap)
614                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
615                                        64);
616         }
617
618         /*
619          * netif_napi_add() can be called only once per napi_struct because it
620          * adds each new napi_struct to a list.  Be careful not to call it a
621          * second time, e.g., during EEH recovery, by making a note of it.
622          */
623         adap->flags |= NAPI_INIT;
624 }
625
626 /*
627  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
628  * both netdevices representing interfaces and the dummy ones for the extra
629  * queues.
630  */
631 static void quiesce_rx(struct adapter *adap)
632 {
633         int i;
634
635         for (i = 0; i < SGE_QSETS; i++)
636                 if (adap->sge.qs[i].adap)
637                         napi_disable(&adap->sge.qs[i].napi);
638 }
639
640 static void enable_all_napi(struct adapter *adap)
641 {
642         int i;
643         for (i = 0; i < SGE_QSETS; i++)
644                 if (adap->sge.qs[i].adap)
645                         napi_enable(&adap->sge.qs[i].napi);
646 }
647
648 /**
649  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
650  *      @adap: the adapter
651  *
652  *      Determines how many sets of SGE queues to use and initializes them.
653  *      We support multiple queue sets per port if we have MSI-X, otherwise
654  *      just one queue set per port.
655  */
656 static int setup_sge_qsets(struct adapter *adap)
657 {
658         int i, j, err, irq_idx = 0, qset_idx = 0;
659         unsigned int ntxq = SGE_TXQ_PER_SET;
660
661         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
662                 irq_idx = -1;
663
664         for_each_port(adap, i) {
665                 struct net_device *dev = adap->port[i];
666                 struct port_info *pi = netdev_priv(dev);
667
668                 pi->qs = &adap->sge.qs[pi->first_qset];
669                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
670                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
671                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
672                                                              irq_idx,
673                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
674                                 netdev_get_tx_queue(dev, j));
675                         if (err) {
676                                 t3_free_sge_resources(adap);
677                                 return err;
678                         }
679                 }
680         }
681
682         return 0;
683 }
684
685 static ssize_t attr_show(struct device *d, char *buf,
686                          ssize_t(*format) (struct net_device *, char *))
687 {
688         ssize_t len;
689
690         /* Synchronize with ioctls that may shut down the device */
691         rtnl_lock();
692         len = (*format) (to_net_dev(d), buf);
693         rtnl_unlock();
694         return len;
695 }
696
697 static ssize_t attr_store(struct device *d,
698                           const char *buf, size_t len,
699                           ssize_t(*set) (struct net_device *, unsigned int),
700                           unsigned int min_val, unsigned int max_val)
701 {
702         ssize_t ret;
703         unsigned int val;
704
705         if (!capable(CAP_NET_ADMIN))
706                 return -EPERM;
707
708         ret = kstrtouint(buf, 0, &val);
709         if (ret)
710                 return ret;
711         if (val < min_val || val > max_val)
712                 return -EINVAL;
713
714         rtnl_lock();
715         ret = (*set) (to_net_dev(d), val);
716         if (!ret)
717                 ret = len;
718         rtnl_unlock();
719         return ret;
720 }
721
722 #define CXGB3_SHOW(name, val_expr) \
723 static ssize_t format_##name(struct net_device *dev, char *buf) \
724 { \
725         struct port_info *pi = netdev_priv(dev); \
726         struct adapter *adap = pi->adapter; \
727         return sprintf(buf, "%u\n", val_expr); \
728 } \
729 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
730                            char *buf) \
731 { \
732         return attr_show(d, buf, format_##name); \
733 }
734
735 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
736 {
737         struct port_info *pi = netdev_priv(dev);
738         struct adapter *adap = pi->adapter;
739         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
740
741         if (adap->flags & FULL_INIT_DONE)
742                 return -EBUSY;
743         if (val && adap->params.rev == 0)
744                 return -EINVAL;
745         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
746             min_tids)
747                 return -EINVAL;
748         adap->params.mc5.nfilters = val;
749         return 0;
750 }
751
752 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
753                               const char *buf, size_t len)
754 {
755         return attr_store(d, buf, len, set_nfilters, 0, ~0);
756 }
757
758 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
759 {
760         struct port_info *pi = netdev_priv(dev);
761         struct adapter *adap = pi->adapter;
762
763         if (adap->flags & FULL_INIT_DONE)
764                 return -EBUSY;
765         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
766             MC5_MIN_TIDS)
767                 return -EINVAL;
768         adap->params.mc5.nservers = val;
769         return 0;
770 }
771
772 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
773                               const char *buf, size_t len)
774 {
775         return attr_store(d, buf, len, set_nservers, 0, ~0);
776 }
777
778 #define CXGB3_ATTR_R(name, val_expr) \
779 CXGB3_SHOW(name, val_expr) \
780 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
781
782 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
783 CXGB3_SHOW(name, val_expr) \
784 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
785
786 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
787 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
788 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
789
790 static struct attribute *cxgb3_attrs[] = {
791         &dev_attr_cam_size.attr,
792         &dev_attr_nfilters.attr,
793         &dev_attr_nservers.attr,
794         NULL
795 };
796
797 static const struct attribute_group cxgb3_attr_group = {
798         .attrs = cxgb3_attrs,
799 };
800
801 static ssize_t tm_attr_show(struct device *d,
802                             char *buf, int sched)
803 {
804         struct port_info *pi = netdev_priv(to_net_dev(d));
805         struct adapter *adap = pi->adapter;
806         unsigned int v, addr, bpt, cpt;
807         ssize_t len;
808
809         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
810         rtnl_lock();
811         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
812         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
813         if (sched & 1)
814                 v >>= 16;
815         bpt = (v >> 8) & 0xff;
816         cpt = v & 0xff;
817         if (!cpt)
818                 len = sprintf(buf, "disabled\n");
819         else {
820                 v = (adap->params.vpd.cclk * 1000) / cpt;
821                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
822         }
823         rtnl_unlock();
824         return len;
825 }
826
827 static ssize_t tm_attr_store(struct device *d,
828                              const char *buf, size_t len, int sched)
829 {
830         struct port_info *pi = netdev_priv(to_net_dev(d));
831         struct adapter *adap = pi->adapter;
832         unsigned int val;
833         ssize_t ret;
834
835         if (!capable(CAP_NET_ADMIN))
836                 return -EPERM;
837
838         ret = kstrtouint(buf, 0, &val);
839         if (ret)
840                 return ret;
841         if (val > 10000000)
842                 return -EINVAL;
843
844         rtnl_lock();
845         ret = t3_config_sched(adap, val, sched);
846         if (!ret)
847                 ret = len;
848         rtnl_unlock();
849         return ret;
850 }
851
852 #define TM_ATTR(name, sched) \
853 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
854                            char *buf) \
855 { \
856         return tm_attr_show(d, buf, sched); \
857 } \
858 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
859                             const char *buf, size_t len) \
860 { \
861         return tm_attr_store(d, buf, len, sched); \
862 } \
863 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
864
865 TM_ATTR(sched0, 0);
866 TM_ATTR(sched1, 1);
867 TM_ATTR(sched2, 2);
868 TM_ATTR(sched3, 3);
869 TM_ATTR(sched4, 4);
870 TM_ATTR(sched5, 5);
871 TM_ATTR(sched6, 6);
872 TM_ATTR(sched7, 7);
873
874 static struct attribute *offload_attrs[] = {
875         &dev_attr_sched0.attr,
876         &dev_attr_sched1.attr,
877         &dev_attr_sched2.attr,
878         &dev_attr_sched3.attr,
879         &dev_attr_sched4.attr,
880         &dev_attr_sched5.attr,
881         &dev_attr_sched6.attr,
882         &dev_attr_sched7.attr,
883         NULL
884 };
885
886 static const struct attribute_group offload_attr_group = {
887         .attrs = offload_attrs,
888 };
889
890 /*
891  * Sends an sk_buff to an offload queue driver
892  * after dealing with any active network taps.
893  */
894 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
895 {
896         int ret;
897
898         local_bh_disable();
899         ret = t3_offload_tx(tdev, skb);
900         local_bh_enable();
901         return ret;
902 }
903
904 static int write_smt_entry(struct adapter *adapter, int idx)
905 {
906         struct cpl_smt_write_req *req;
907         struct port_info *pi = netdev_priv(adapter->port[idx]);
908         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
909
910         if (!skb)
911                 return -ENOMEM;
912
913         req = __skb_put(skb, sizeof(*req));
914         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
915         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
916         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
917         req->iff = idx;
918         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
919         memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
920         skb->priority = 1;
921         offload_tx(&adapter->tdev, skb);
922         return 0;
923 }
924
925 static int init_smt(struct adapter *adapter)
926 {
927         int i;
928
929         for_each_port(adapter, i)
930             write_smt_entry(adapter, i);
931         return 0;
932 }
933
934 static void init_port_mtus(struct adapter *adapter)
935 {
936         unsigned int mtus = adapter->port[0]->mtu;
937
938         if (adapter->port[1])
939                 mtus |= adapter->port[1]->mtu << 16;
940         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
941 }
942
943 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
944                               int hi, int port)
945 {
946         struct sk_buff *skb;
947         struct mngt_pktsched_wr *req;
948         int ret;
949
950         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
951         if (!skb)
952                 skb = adap->nofail_skb;
953         if (!skb)
954                 return -ENOMEM;
955
956         req = skb_put(skb, sizeof(*req));
957         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
958         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
959         req->sched = sched;
960         req->idx = qidx;
961         req->min = lo;
962         req->max = hi;
963         req->binding = port;
964         ret = t3_mgmt_tx(adap, skb);
965         if (skb == adap->nofail_skb) {
966                 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
967                                              GFP_KERNEL);
968                 if (!adap->nofail_skb)
969                         ret = -ENOMEM;
970         }
971
972         return ret;
973 }
974
975 static int bind_qsets(struct adapter *adap)
976 {
977         int i, j, err = 0;
978
979         for_each_port(adap, i) {
980                 const struct port_info *pi = adap2pinfo(adap, i);
981
982                 for (j = 0; j < pi->nqsets; ++j) {
983                         int ret = send_pktsched_cmd(adap, 1,
984                                                     pi->first_qset + j, -1,
985                                                     -1, i);
986                         if (ret)
987                                 err = ret;
988                 }
989         }
990
991         return err;
992 }
993
994 /*(DEBLOBBED)*/
995 #define FW_FNAME "/*(DEBLOBBED)*/"
996 /*(DEBLOBBED)*/
997 #define TPSRAM_NAME "/*(DEBLOBBED)*/"
998 #define AEL2005_OPT_EDC_NAME "/*(DEBLOBBED)*/"
999 #define AEL2005_TWX_EDC_NAME "/*(DEBLOBBED)*/"
1000 #define AEL2020_TWX_EDC_NAME "/*(DEBLOBBED)*/"
1001 /*(DEBLOBBED)*/
1002
1003 static inline const char *get_edc_fw_name(int edc_idx)
1004 {
1005         const char *fw_name = NULL;
1006
1007         switch (edc_idx) {
1008         case EDC_OPT_AEL2005:
1009                 fw_name = AEL2005_OPT_EDC_NAME;
1010                 break;
1011         case EDC_TWX_AEL2005:
1012                 fw_name = AEL2005_TWX_EDC_NAME;
1013                 break;
1014         case EDC_TWX_AEL2020:
1015                 fw_name = AEL2020_TWX_EDC_NAME;
1016                 break;
1017         }
1018         return fw_name;
1019 }
1020
1021 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1022 {
1023         struct adapter *adapter = phy->adapter;
1024         const struct firmware *fw;
1025         const char *fw_name;
1026         u32 csum;
1027         const __be32 *p;
1028         u16 *cache = phy->phy_cache;
1029         int i, ret = -EINVAL;
1030
1031         fw_name = get_edc_fw_name(edc_idx);
1032         if (fw_name)
1033                 ret = reject_firmware(&fw, fw_name, &adapter->pdev->dev);
1034         if (ret < 0) {
1035                 dev_err(&adapter->pdev->dev,
1036                         "could not upgrade firmware: unable to load %s\n",
1037                         fw_name);
1038                 return ret;
1039         }
1040
1041         /* check size, take checksum in account */
1042         if (fw->size > size + 4) {
1043                 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1044                        (unsigned int)fw->size, size + 4);
1045                 ret = -EINVAL;
1046         }
1047
1048         /* compute checksum */
1049         p = (const __be32 *)fw->data;
1050         for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1051                 csum += ntohl(p[i]);
1052
1053         if (csum != 0xffffffff) {
1054                 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1055                        csum);
1056                 ret = -EINVAL;
1057         }
1058
1059         for (i = 0; i < size / 4 ; i++) {
1060                 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1061                 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1062         }
1063
1064         release_firmware(fw);
1065
1066         return ret;
1067 }
1068
1069 static int upgrade_fw(struct adapter *adap)
1070 {
1071         int ret;
1072         const struct firmware *fw;
1073         struct device *dev = &adap->pdev->dev;
1074
1075         ret = reject_firmware(&fw, FW_FNAME, dev);
1076         if (ret < 0) {
1077                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1078                         FW_FNAME);
1079                 return ret;
1080         }
1081         ret = t3_load_fw(adap, fw->data, fw->size);
1082         release_firmware(fw);
1083
1084         if (ret == 0)
1085                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1086                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1087         else
1088                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1089                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1090
1091         return ret;
1092 }
1093
1094 static inline char t3rev2char(struct adapter *adapter)
1095 {
1096         char rev = 0;
1097
1098         switch(adapter->params.rev) {
1099         case T3_REV_B:
1100         case T3_REV_B2:
1101                 rev = 'b';
1102                 break;
1103         case T3_REV_C:
1104                 rev = 'c';
1105                 break;
1106         }
1107         return rev;
1108 }
1109
1110 static int update_tpsram(struct adapter *adap)
1111 {
1112         const struct firmware *tpsram;
1113         char buf[64];
1114         struct device *dev = &adap->pdev->dev;
1115         int ret;
1116         char rev;
1117
1118         rev = t3rev2char(adap);
1119         if (!rev)
1120                 return 0;
1121
1122         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1123
1124         ret = reject_firmware(&tpsram, buf, dev);
1125         if (ret < 0) {
1126                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1127                         buf);
1128                 return ret;
1129         }
1130
1131         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1132         if (ret)
1133                 goto release_tpsram;
1134
1135         ret = t3_set_proto_sram(adap, tpsram->data);
1136         if (ret == 0)
1137                 dev_info(dev,
1138                          "successful update of protocol engine "
1139                          "to %d.%d.%d\n",
1140                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1141         else
1142                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1143                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1144         if (ret)
1145                 dev_err(dev, "loading protocol SRAM failed\n");
1146
1147 release_tpsram:
1148         release_firmware(tpsram);
1149
1150         return ret;
1151 }
1152
1153 /**
1154  * t3_synchronize_rx - wait for current Rx processing on a port to complete
1155  * @adap: the adapter
1156  * @p: the port
1157  *
1158  * Ensures that current Rx processing on any of the queues associated with
1159  * the given port completes before returning.  We do this by acquiring and
1160  * releasing the locks of the response queues associated with the port.
1161  */
1162 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1163 {
1164         int i;
1165
1166         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1167                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1168
1169                 spin_lock_irq(&q->lock);
1170                 spin_unlock_irq(&q->lock);
1171         }
1172 }
1173
1174 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1175 {
1176         struct port_info *pi = netdev_priv(dev);
1177         struct adapter *adapter = pi->adapter;
1178
1179         if (adapter->params.rev > 0) {
1180                 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1181                                   features & NETIF_F_HW_VLAN_CTAG_RX);
1182         } else {
1183                 /* single control for all ports */
1184                 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1185
1186                 for_each_port(adapter, i)
1187                         have_vlans |=
1188                                 adapter->port[i]->features &
1189                                 NETIF_F_HW_VLAN_CTAG_RX;
1190
1191                 t3_set_vlan_accel(adapter, 1, have_vlans);
1192         }
1193         t3_synchronize_rx(adapter, pi);
1194 }
1195
1196 /**
1197  *      cxgb_up - enable the adapter
1198  *      @adapter: adapter being enabled
1199  *
1200  *      Called when the first port is enabled, this function performs the
1201  *      actions necessary to make an adapter operational, such as completing
1202  *      the initialization of HW modules, and enabling interrupts.
1203  *
1204  *      Must be called with the rtnl lock held.
1205  */
1206 static int cxgb_up(struct adapter *adap)
1207 {
1208         int i, err;
1209
1210         if (!(adap->flags & FULL_INIT_DONE)) {
1211                 err = t3_check_fw_version(adap);
1212                 if (err == -EINVAL) {
1213                         err = upgrade_fw(adap);
1214                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1215                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1216                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1217                 }
1218
1219                 err = t3_check_tpsram_version(adap);
1220                 if (err == -EINVAL) {
1221                         err = update_tpsram(adap);
1222                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1223                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1224                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1225                 }
1226
1227                 /*
1228                  * Clear interrupts now to catch errors if t3_init_hw fails.
1229                  * We clear them again later as initialization may trigger
1230                  * conditions that can interrupt.
1231                  */
1232                 t3_intr_clear(adap);
1233
1234                 err = t3_init_hw(adap, 0);
1235                 if (err)
1236                         goto out;
1237
1238                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1239                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1240
1241                 err = setup_sge_qsets(adap);
1242                 if (err)
1243                         goto out;
1244
1245                 for_each_port(adap, i)
1246                         cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1247
1248                 setup_rss(adap);
1249                 if (!(adap->flags & NAPI_INIT))
1250                         init_napi(adap);
1251
1252                 t3_start_sge_timers(adap);
1253                 adap->flags |= FULL_INIT_DONE;
1254         }
1255
1256         t3_intr_clear(adap);
1257
1258         if (adap->flags & USING_MSIX) {
1259                 name_msix_vecs(adap);
1260                 err = request_irq(adap->msix_info[0].vec,
1261                                   t3_async_intr_handler, 0,
1262                                   adap->msix_info[0].desc, adap);
1263                 if (err)
1264                         goto irq_err;
1265
1266                 err = request_msix_data_irqs(adap);
1267                 if (err) {
1268                         free_irq(adap->msix_info[0].vec, adap);
1269                         goto irq_err;
1270                 }
1271         } else if ((err = request_irq(adap->pdev->irq,
1272                                       t3_intr_handler(adap,
1273                                                       adap->sge.qs[0].rspq.
1274                                                       polling),
1275                                       (adap->flags & USING_MSI) ?
1276                                        0 : IRQF_SHARED,
1277                                       adap->name, adap)))
1278                 goto irq_err;
1279
1280         enable_all_napi(adap);
1281         t3_sge_start(adap);
1282         t3_intr_enable(adap);
1283
1284         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1285             is_offload(adap) && init_tp_parity(adap) == 0)
1286                 adap->flags |= TP_PARITY_INIT;
1287
1288         if (adap->flags & TP_PARITY_INIT) {
1289                 t3_write_reg(adap, A_TP_INT_CAUSE,
1290                              F_CMCACHEPERR | F_ARPLUTPERR);
1291                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1292         }
1293
1294         if (!(adap->flags & QUEUES_BOUND)) {
1295                 int ret = bind_qsets(adap);
1296
1297                 if (ret < 0) {
1298                         CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1299                         t3_intr_disable(adap);
1300                         free_irq_resources(adap);
1301                         err = ret;
1302                         goto out;
1303                 }
1304                 adap->flags |= QUEUES_BOUND;
1305         }
1306
1307 out:
1308         return err;
1309 irq_err:
1310         CH_ERR(adap, "request_irq failed, err %d\n", err);
1311         goto out;
1312 }
1313
1314 /*
1315  * Release resources when all the ports and offloading have been stopped.
1316  */
1317 static void cxgb_down(struct adapter *adapter, int on_wq)
1318 {
1319         t3_sge_stop(adapter);
1320         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1321         t3_intr_disable(adapter);
1322         spin_unlock_irq(&adapter->work_lock);
1323
1324         free_irq_resources(adapter);
1325         quiesce_rx(adapter);
1326         t3_sge_stop(adapter);
1327         if (!on_wq)
1328                 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1329 }
1330
1331 static void schedule_chk_task(struct adapter *adap)
1332 {
1333         unsigned int timeo;
1334
1335         timeo = adap->params.linkpoll_period ?
1336             (HZ * adap->params.linkpoll_period) / 10 :
1337             adap->params.stats_update_period * HZ;
1338         if (timeo)
1339                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1340 }
1341
1342 static int offload_open(struct net_device *dev)
1343 {
1344         struct port_info *pi = netdev_priv(dev);
1345         struct adapter *adapter = pi->adapter;
1346         struct t3cdev *tdev = dev2t3cdev(dev);
1347         int adap_up = adapter->open_device_map & PORT_MASK;
1348         int err;
1349
1350         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1351                 return 0;
1352
1353         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1354                 goto out;
1355
1356         t3_tp_set_offload_mode(adapter, 1);
1357         tdev->lldev = adapter->port[0];
1358         err = cxgb3_offload_activate(adapter);
1359         if (err)
1360                 goto out;
1361
1362         init_port_mtus(adapter);
1363         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1364                      adapter->params.b_wnd,
1365                      adapter->params.rev == 0 ?
1366                      adapter->port[0]->mtu : 0xffff);
1367         init_smt(adapter);
1368
1369         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1370                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1371
1372         /* Call back all registered clients */
1373         cxgb3_add_clients(tdev);
1374
1375 out:
1376         /* restore them in case the offload module has changed them */
1377         if (err) {
1378                 t3_tp_set_offload_mode(adapter, 0);
1379                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1380                 cxgb3_set_dummy_ops(tdev);
1381         }
1382         return err;
1383 }
1384
1385 static int offload_close(struct t3cdev *tdev)
1386 {
1387         struct adapter *adapter = tdev2adap(tdev);
1388         struct t3c_data *td = T3C_DATA(tdev);
1389
1390         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1391                 return 0;
1392
1393         /* Call back all registered clients */
1394         cxgb3_remove_clients(tdev);
1395
1396         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1397
1398         /* Flush work scheduled while releasing TIDs */
1399         flush_work(&td->tid_release_task);
1400
1401         tdev->lldev = NULL;
1402         cxgb3_set_dummy_ops(tdev);
1403         t3_tp_set_offload_mode(adapter, 0);
1404         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1405
1406         if (!adapter->open_device_map)
1407                 cxgb_down(adapter, 0);
1408
1409         cxgb3_offload_deactivate(adapter);
1410         return 0;
1411 }
1412
1413 static int cxgb_open(struct net_device *dev)
1414 {
1415         struct port_info *pi = netdev_priv(dev);
1416         struct adapter *adapter = pi->adapter;
1417         int other_ports = adapter->open_device_map & PORT_MASK;
1418         int err;
1419
1420         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1421                 return err;
1422
1423         set_bit(pi->port_id, &adapter->open_device_map);
1424         if (is_offload(adapter) && !ofld_disable) {
1425                 err = offload_open(dev);
1426                 if (err)
1427                         pr_warn("Could not initialize offload capabilities\n");
1428         }
1429
1430         netif_set_real_num_tx_queues(dev, pi->nqsets);
1431         err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1432         if (err)
1433                 return err;
1434         link_start(dev);
1435         t3_port_intr_enable(adapter, pi->port_id);
1436         netif_tx_start_all_queues(dev);
1437         if (!other_ports)
1438                 schedule_chk_task(adapter);
1439
1440         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1441         return 0;
1442 }
1443
1444 static int __cxgb_close(struct net_device *dev, int on_wq)
1445 {
1446         struct port_info *pi = netdev_priv(dev);
1447         struct adapter *adapter = pi->adapter;
1448
1449         
1450         if (!adapter->open_device_map)
1451                 return 0;
1452
1453         /* Stop link fault interrupts */
1454         t3_xgm_intr_disable(adapter, pi->port_id);
1455         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1456
1457         t3_port_intr_disable(adapter, pi->port_id);
1458         netif_tx_stop_all_queues(dev);
1459         pi->phy.ops->power_down(&pi->phy, 1);
1460         netif_carrier_off(dev);
1461         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1462
1463         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1464         clear_bit(pi->port_id, &adapter->open_device_map);
1465         spin_unlock_irq(&adapter->work_lock);
1466
1467         if (!(adapter->open_device_map & PORT_MASK))
1468                 cancel_delayed_work_sync(&adapter->adap_check_task);
1469
1470         if (!adapter->open_device_map)
1471                 cxgb_down(adapter, on_wq);
1472
1473         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1474         return 0;
1475 }
1476
1477 static int cxgb_close(struct net_device *dev)
1478 {
1479         return __cxgb_close(dev, 0);
1480 }
1481
1482 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1483 {
1484         struct port_info *pi = netdev_priv(dev);
1485         struct adapter *adapter = pi->adapter;
1486         struct net_device_stats *ns = &dev->stats;
1487         const struct mac_stats *pstats;
1488
1489         spin_lock(&adapter->stats_lock);
1490         pstats = t3_mac_update_stats(&pi->mac);
1491         spin_unlock(&adapter->stats_lock);
1492
1493         ns->tx_bytes = pstats->tx_octets;
1494         ns->tx_packets = pstats->tx_frames;
1495         ns->rx_bytes = pstats->rx_octets;
1496         ns->rx_packets = pstats->rx_frames;
1497         ns->multicast = pstats->rx_mcast_frames;
1498
1499         ns->tx_errors = pstats->tx_underrun;
1500         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1501             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1502             pstats->rx_fifo_ovfl;
1503
1504         /* detailed rx_errors */
1505         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1506         ns->rx_over_errors = 0;
1507         ns->rx_crc_errors = pstats->rx_fcs_errs;
1508         ns->rx_frame_errors = pstats->rx_symbol_errs;
1509         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1510         ns->rx_missed_errors = pstats->rx_cong_drops;
1511
1512         /* detailed tx_errors */
1513         ns->tx_aborted_errors = 0;
1514         ns->tx_carrier_errors = 0;
1515         ns->tx_fifo_errors = pstats->tx_underrun;
1516         ns->tx_heartbeat_errors = 0;
1517         ns->tx_window_errors = 0;
1518         return ns;
1519 }
1520
1521 static u32 get_msglevel(struct net_device *dev)
1522 {
1523         struct port_info *pi = netdev_priv(dev);
1524         struct adapter *adapter = pi->adapter;
1525
1526         return adapter->msg_enable;
1527 }
1528
1529 static void set_msglevel(struct net_device *dev, u32 val)
1530 {
1531         struct port_info *pi = netdev_priv(dev);
1532         struct adapter *adapter = pi->adapter;
1533
1534         adapter->msg_enable = val;
1535 }
1536
1537 static const char stats_strings[][ETH_GSTRING_LEN] = {
1538         "TxOctetsOK         ",
1539         "TxFramesOK         ",
1540         "TxMulticastFramesOK",
1541         "TxBroadcastFramesOK",
1542         "TxPauseFrames      ",
1543         "TxUnderrun         ",
1544         "TxExtUnderrun      ",
1545
1546         "TxFrames64         ",
1547         "TxFrames65To127    ",
1548         "TxFrames128To255   ",
1549         "TxFrames256To511   ",
1550         "TxFrames512To1023  ",
1551         "TxFrames1024To1518 ",
1552         "TxFrames1519ToMax  ",
1553
1554         "RxOctetsOK         ",
1555         "RxFramesOK         ",
1556         "RxMulticastFramesOK",
1557         "RxBroadcastFramesOK",
1558         "RxPauseFrames      ",
1559         "RxFCSErrors        ",
1560         "RxSymbolErrors     ",
1561         "RxShortErrors      ",
1562         "RxJabberErrors     ",
1563         "RxLengthErrors     ",
1564         "RxFIFOoverflow     ",
1565
1566         "RxFrames64         ",
1567         "RxFrames65To127    ",
1568         "RxFrames128To255   ",
1569         "RxFrames256To511   ",
1570         "RxFrames512To1023  ",
1571         "RxFrames1024To1518 ",
1572         "RxFrames1519ToMax  ",
1573
1574         "PhyFIFOErrors      ",
1575         "TSO                ",
1576         "VLANextractions    ",
1577         "VLANinsertions     ",
1578         "TxCsumOffload      ",
1579         "RxCsumGood         ",
1580         "LroAggregated      ",
1581         "LroFlushed         ",
1582         "LroNoDesc          ",
1583         "RxDrops            ",
1584
1585         "CheckTXEnToggled   ",
1586         "CheckResets        ",
1587
1588         "LinkFaults         ",
1589 };
1590
1591 static int get_sset_count(struct net_device *dev, int sset)
1592 {
1593         switch (sset) {
1594         case ETH_SS_STATS:
1595                 return ARRAY_SIZE(stats_strings);
1596         default:
1597                 return -EOPNOTSUPP;
1598         }
1599 }
1600
1601 #define T3_REGMAP_SIZE (3 * 1024)
1602
1603 static int get_regs_len(struct net_device *dev)
1604 {
1605         return T3_REGMAP_SIZE;
1606 }
1607
1608 static int get_eeprom_len(struct net_device *dev)
1609 {
1610         return EEPROMSIZE;
1611 }
1612
1613 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1614 {
1615         struct port_info *pi = netdev_priv(dev);
1616         struct adapter *adapter = pi->adapter;
1617         u32 fw_vers = 0;
1618         u32 tp_vers = 0;
1619
1620         spin_lock(&adapter->stats_lock);
1621         t3_get_fw_version(adapter, &fw_vers);
1622         t3_get_tp_version(adapter, &tp_vers);
1623         spin_unlock(&adapter->stats_lock);
1624
1625         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1626         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1627         strlcpy(info->bus_info, pci_name(adapter->pdev),
1628                 sizeof(info->bus_info));
1629         if (fw_vers)
1630                 snprintf(info->fw_version, sizeof(info->fw_version),
1631                          "%s %u.%u.%u TP %u.%u.%u",
1632                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1633                          G_FW_VERSION_MAJOR(fw_vers),
1634                          G_FW_VERSION_MINOR(fw_vers),
1635                          G_FW_VERSION_MICRO(fw_vers),
1636                          G_TP_VERSION_MAJOR(tp_vers),
1637                          G_TP_VERSION_MINOR(tp_vers),
1638                          G_TP_VERSION_MICRO(tp_vers));
1639 }
1640
1641 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1642 {
1643         if (stringset == ETH_SS_STATS)
1644                 memcpy(data, stats_strings, sizeof(stats_strings));
1645 }
1646
1647 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1648                                             struct port_info *p, int idx)
1649 {
1650         int i;
1651         unsigned long tot = 0;
1652
1653         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1654                 tot += adapter->sge.qs[i].port_stats[idx];
1655         return tot;
1656 }
1657
1658 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1659                       u64 *data)
1660 {
1661         struct port_info *pi = netdev_priv(dev);
1662         struct adapter *adapter = pi->adapter;
1663         const struct mac_stats *s;
1664
1665         spin_lock(&adapter->stats_lock);
1666         s = t3_mac_update_stats(&pi->mac);
1667         spin_unlock(&adapter->stats_lock);
1668
1669         *data++ = s->tx_octets;
1670         *data++ = s->tx_frames;
1671         *data++ = s->tx_mcast_frames;
1672         *data++ = s->tx_bcast_frames;
1673         *data++ = s->tx_pause;
1674         *data++ = s->tx_underrun;
1675         *data++ = s->tx_fifo_urun;
1676
1677         *data++ = s->tx_frames_64;
1678         *data++ = s->tx_frames_65_127;
1679         *data++ = s->tx_frames_128_255;
1680         *data++ = s->tx_frames_256_511;
1681         *data++ = s->tx_frames_512_1023;
1682         *data++ = s->tx_frames_1024_1518;
1683         *data++ = s->tx_frames_1519_max;
1684
1685         *data++ = s->rx_octets;
1686         *data++ = s->rx_frames;
1687         *data++ = s->rx_mcast_frames;
1688         *data++ = s->rx_bcast_frames;
1689         *data++ = s->rx_pause;
1690         *data++ = s->rx_fcs_errs;
1691         *data++ = s->rx_symbol_errs;
1692         *data++ = s->rx_short;
1693         *data++ = s->rx_jabber;
1694         *data++ = s->rx_too_long;
1695         *data++ = s->rx_fifo_ovfl;
1696
1697         *data++ = s->rx_frames_64;
1698         *data++ = s->rx_frames_65_127;
1699         *data++ = s->rx_frames_128_255;
1700         *data++ = s->rx_frames_256_511;
1701         *data++ = s->rx_frames_512_1023;
1702         *data++ = s->rx_frames_1024_1518;
1703         *data++ = s->rx_frames_1519_max;
1704
1705         *data++ = pi->phy.fifo_errors;
1706
1707         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1708         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1709         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1710         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1711         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1712         *data++ = 0;
1713         *data++ = 0;
1714         *data++ = 0;
1715         *data++ = s->rx_cong_drops;
1716
1717         *data++ = s->num_toggled;
1718         *data++ = s->num_resets;
1719
1720         *data++ = s->link_faults;
1721 }
1722
1723 static inline void reg_block_dump(struct adapter *ap, void *buf,
1724                                   unsigned int start, unsigned int end)
1725 {
1726         u32 *p = buf + start;
1727
1728         for (; start <= end; start += sizeof(u32))
1729                 *p++ = t3_read_reg(ap, start);
1730 }
1731
1732 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1733                      void *buf)
1734 {
1735         struct port_info *pi = netdev_priv(dev);
1736         struct adapter *ap = pi->adapter;
1737
1738         /*
1739          * Version scheme:
1740          * bits 0..9: chip version
1741          * bits 10..15: chip revision
1742          * bit 31: set for PCIe cards
1743          */
1744         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1745
1746         /*
1747          * We skip the MAC statistics registers because they are clear-on-read.
1748          * Also reading multi-register stats would need to synchronize with the
1749          * periodic mac stats accumulation.  Hard to justify the complexity.
1750          */
1751         memset(buf, 0, T3_REGMAP_SIZE);
1752         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1753         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1754         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1755         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1756         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1757         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1758                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1759         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1760                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1761 }
1762
1763 static int restart_autoneg(struct net_device *dev)
1764 {
1765         struct port_info *p = netdev_priv(dev);
1766
1767         if (!netif_running(dev))
1768                 return -EAGAIN;
1769         if (p->link_config.autoneg != AUTONEG_ENABLE)
1770                 return -EINVAL;
1771         p->phy.ops->autoneg_restart(&p->phy);
1772         return 0;
1773 }
1774
1775 static int set_phys_id(struct net_device *dev,
1776                        enum ethtool_phys_id_state state)
1777 {
1778         struct port_info *pi = netdev_priv(dev);
1779         struct adapter *adapter = pi->adapter;
1780
1781         switch (state) {
1782         case ETHTOOL_ID_ACTIVE:
1783                 return 1;       /* cycle on/off once per second */
1784
1785         case ETHTOOL_ID_OFF:
1786                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1787                 break;
1788
1789         case ETHTOOL_ID_ON:
1790         case ETHTOOL_ID_INACTIVE:
1791                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1792                          F_GPIO0_OUT_VAL);
1793         }
1794
1795         return 0;
1796 }
1797
1798 static int get_link_ksettings(struct net_device *dev,
1799                               struct ethtool_link_ksettings *cmd)
1800 {
1801         struct port_info *p = netdev_priv(dev);
1802         u32 supported;
1803
1804         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1805                                                 p->link_config.supported);
1806         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1807                                                 p->link_config.advertising);
1808
1809         if (netif_carrier_ok(dev)) {
1810                 cmd->base.speed = p->link_config.speed;
1811                 cmd->base.duplex = p->link_config.duplex;
1812         } else {
1813                 cmd->base.speed = SPEED_UNKNOWN;
1814                 cmd->base.duplex = DUPLEX_UNKNOWN;
1815         }
1816
1817         ethtool_convert_link_mode_to_legacy_u32(&supported,
1818                                                 cmd->link_modes.supported);
1819
1820         cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1821         cmd->base.phy_address = p->phy.mdio.prtad;
1822         cmd->base.autoneg = p->link_config.autoneg;
1823         return 0;
1824 }
1825
1826 static int speed_duplex_to_caps(int speed, int duplex)
1827 {
1828         int cap = 0;
1829
1830         switch (speed) {
1831         case SPEED_10:
1832                 if (duplex == DUPLEX_FULL)
1833                         cap = SUPPORTED_10baseT_Full;
1834                 else
1835                         cap = SUPPORTED_10baseT_Half;
1836                 break;
1837         case SPEED_100:
1838                 if (duplex == DUPLEX_FULL)
1839                         cap = SUPPORTED_100baseT_Full;
1840                 else
1841                         cap = SUPPORTED_100baseT_Half;
1842                 break;
1843         case SPEED_1000:
1844                 if (duplex == DUPLEX_FULL)
1845                         cap = SUPPORTED_1000baseT_Full;
1846                 else
1847                         cap = SUPPORTED_1000baseT_Half;
1848                 break;
1849         case SPEED_10000:
1850                 if (duplex == DUPLEX_FULL)
1851                         cap = SUPPORTED_10000baseT_Full;
1852         }
1853         return cap;
1854 }
1855
1856 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1857                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1858                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1859                       ADVERTISED_10000baseT_Full)
1860
1861 static int set_link_ksettings(struct net_device *dev,
1862                               const struct ethtool_link_ksettings *cmd)
1863 {
1864         struct port_info *p = netdev_priv(dev);
1865         struct link_config *lc = &p->link_config;
1866         u32 advertising;
1867
1868         ethtool_convert_link_mode_to_legacy_u32(&advertising,
1869                                                 cmd->link_modes.advertising);
1870
1871         if (!(lc->supported & SUPPORTED_Autoneg)) {
1872                 /*
1873                  * PHY offers a single speed/duplex.  See if that's what's
1874                  * being requested.
1875                  */
1876                 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1877                         u32 speed = cmd->base.speed;
1878                         int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1879                         if (lc->supported & cap)
1880                                 return 0;
1881                 }
1882                 return -EINVAL;
1883         }
1884
1885         if (cmd->base.autoneg == AUTONEG_DISABLE) {
1886                 u32 speed = cmd->base.speed;
1887                 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1888
1889                 if (!(lc->supported & cap) || (speed == SPEED_1000))
1890                         return -EINVAL;
1891                 lc->requested_speed = speed;
1892                 lc->requested_duplex = cmd->base.duplex;
1893                 lc->advertising = 0;
1894         } else {
1895                 advertising &= ADVERTISED_MASK;
1896                 advertising &= lc->supported;
1897                 if (!advertising)
1898                         return -EINVAL;
1899                 lc->requested_speed = SPEED_INVALID;
1900                 lc->requested_duplex = DUPLEX_INVALID;
1901                 lc->advertising = advertising | ADVERTISED_Autoneg;
1902         }
1903         lc->autoneg = cmd->base.autoneg;
1904         if (netif_running(dev))
1905                 t3_link_start(&p->phy, &p->mac, lc);
1906         return 0;
1907 }
1908
1909 static void get_pauseparam(struct net_device *dev,
1910                            struct ethtool_pauseparam *epause)
1911 {
1912         struct port_info *p = netdev_priv(dev);
1913
1914         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1915         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1916         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1917 }
1918
1919 static int set_pauseparam(struct net_device *dev,
1920                           struct ethtool_pauseparam *epause)
1921 {
1922         struct port_info *p = netdev_priv(dev);
1923         struct link_config *lc = &p->link_config;
1924
1925         if (epause->autoneg == AUTONEG_DISABLE)
1926                 lc->requested_fc = 0;
1927         else if (lc->supported & SUPPORTED_Autoneg)
1928                 lc->requested_fc = PAUSE_AUTONEG;
1929         else
1930                 return -EINVAL;
1931
1932         if (epause->rx_pause)
1933                 lc->requested_fc |= PAUSE_RX;
1934         if (epause->tx_pause)
1935                 lc->requested_fc |= PAUSE_TX;
1936         if (lc->autoneg == AUTONEG_ENABLE) {
1937                 if (netif_running(dev))
1938                         t3_link_start(&p->phy, &p->mac, lc);
1939         } else {
1940                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1941                 if (netif_running(dev))
1942                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1943         }
1944         return 0;
1945 }
1946
1947 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1948 {
1949         struct port_info *pi = netdev_priv(dev);
1950         struct adapter *adapter = pi->adapter;
1951         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1952
1953         e->rx_max_pending = MAX_RX_BUFFERS;
1954         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1955         e->tx_max_pending = MAX_TXQ_ENTRIES;
1956
1957         e->rx_pending = q->fl_size;
1958         e->rx_mini_pending = q->rspq_size;
1959         e->rx_jumbo_pending = q->jumbo_size;
1960         e->tx_pending = q->txq_size[0];
1961 }
1962
1963 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1964 {
1965         struct port_info *pi = netdev_priv(dev);
1966         struct adapter *adapter = pi->adapter;
1967         struct qset_params *q;
1968         int i;
1969
1970         if (e->rx_pending > MAX_RX_BUFFERS ||
1971             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1972             e->tx_pending > MAX_TXQ_ENTRIES ||
1973             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1974             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1975             e->rx_pending < MIN_FL_ENTRIES ||
1976             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1977             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1978                 return -EINVAL;
1979
1980         if (adapter->flags & FULL_INIT_DONE)
1981                 return -EBUSY;
1982
1983         q = &adapter->params.sge.qset[pi->first_qset];
1984         for (i = 0; i < pi->nqsets; ++i, ++q) {
1985                 q->rspq_size = e->rx_mini_pending;
1986                 q->fl_size = e->rx_pending;
1987                 q->jumbo_size = e->rx_jumbo_pending;
1988                 q->txq_size[0] = e->tx_pending;
1989                 q->txq_size[1] = e->tx_pending;
1990                 q->txq_size[2] = e->tx_pending;
1991         }
1992         return 0;
1993 }
1994
1995 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1996 {
1997         struct port_info *pi = netdev_priv(dev);
1998         struct adapter *adapter = pi->adapter;
1999         struct qset_params *qsp;
2000         struct sge_qset *qs;
2001         int i;
2002
2003         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2004                 return -EINVAL;
2005
2006         for (i = 0; i < pi->nqsets; i++) {
2007                 qsp = &adapter->params.sge.qset[i];
2008                 qs = &adapter->sge.qs[i];
2009                 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2010                 t3_update_qset_coalesce(qs, qsp);
2011         }
2012
2013         return 0;
2014 }
2015
2016 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2017 {
2018         struct port_info *pi = netdev_priv(dev);
2019         struct adapter *adapter = pi->adapter;
2020         struct qset_params *q = adapter->params.sge.qset;
2021
2022         c->rx_coalesce_usecs = q->coalesce_usecs;
2023         return 0;
2024 }
2025
2026 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2027                       u8 * data)
2028 {
2029         struct port_info *pi = netdev_priv(dev);
2030         struct adapter *adapter = pi->adapter;
2031         int i, err = 0;
2032
2033         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2034         if (!buf)
2035                 return -ENOMEM;
2036
2037         e->magic = EEPROM_MAGIC;
2038         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2039                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2040
2041         if (!err)
2042                 memcpy(data, buf + e->offset, e->len);
2043         kfree(buf);
2044         return err;
2045 }
2046
2047 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2048                       u8 * data)
2049 {
2050         struct port_info *pi = netdev_priv(dev);
2051         struct adapter *adapter = pi->adapter;
2052         u32 aligned_offset, aligned_len;
2053         __le32 *p;
2054         u8 *buf;
2055         int err;
2056
2057         if (eeprom->magic != EEPROM_MAGIC)
2058                 return -EINVAL;
2059
2060         aligned_offset = eeprom->offset & ~3;
2061         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2062
2063         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2064                 buf = kmalloc(aligned_len, GFP_KERNEL);
2065                 if (!buf)
2066                         return -ENOMEM;
2067                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2068                 if (!err && aligned_len > 4)
2069                         err = t3_seeprom_read(adapter,
2070                                               aligned_offset + aligned_len - 4,
2071                                               (__le32 *) & buf[aligned_len - 4]);
2072                 if (err)
2073                         goto out;
2074                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2075         } else
2076                 buf = data;
2077
2078         err = t3_seeprom_wp(adapter, 0);
2079         if (err)
2080                 goto out;
2081
2082         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2083                 err = t3_seeprom_write(adapter, aligned_offset, *p);
2084                 aligned_offset += 4;
2085         }
2086
2087         if (!err)
2088                 err = t3_seeprom_wp(adapter, 1);
2089 out:
2090         if (buf != data)
2091                 kfree(buf);
2092         return err;
2093 }
2094
2095 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2096 {
2097         wol->supported = 0;
2098         wol->wolopts = 0;
2099         memset(&wol->sopass, 0, sizeof(wol->sopass));
2100 }
2101
2102 static const struct ethtool_ops cxgb_ethtool_ops = {
2103         .get_drvinfo = get_drvinfo,
2104         .get_msglevel = get_msglevel,
2105         .set_msglevel = set_msglevel,
2106         .get_ringparam = get_sge_param,
2107         .set_ringparam = set_sge_param,
2108         .get_coalesce = get_coalesce,
2109         .set_coalesce = set_coalesce,
2110         .get_eeprom_len = get_eeprom_len,
2111         .get_eeprom = get_eeprom,
2112         .set_eeprom = set_eeprom,
2113         .get_pauseparam = get_pauseparam,
2114         .set_pauseparam = set_pauseparam,
2115         .get_link = ethtool_op_get_link,
2116         .get_strings = get_strings,
2117         .set_phys_id = set_phys_id,
2118         .nway_reset = restart_autoneg,
2119         .get_sset_count = get_sset_count,
2120         .get_ethtool_stats = get_stats,
2121         .get_regs_len = get_regs_len,
2122         .get_regs = get_regs,
2123         .get_wol = get_wol,
2124         .get_link_ksettings = get_link_ksettings,
2125         .set_link_ksettings = set_link_ksettings,
2126 };
2127
2128 static int in_range(int val, int lo, int hi)
2129 {
2130         return val < 0 || (val <= hi && val >= lo);
2131 }
2132
2133 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2134 {
2135         struct port_info *pi = netdev_priv(dev);
2136         struct adapter *adapter = pi->adapter;
2137         u32 cmd;
2138         int ret;
2139
2140         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2141                 return -EFAULT;
2142
2143         switch (cmd) {
2144         case CHELSIO_SET_QSET_PARAMS:{
2145                 int i;
2146                 struct qset_params *q;
2147                 struct ch_qset_params t;
2148                 int q1 = pi->first_qset;
2149                 int nqsets = pi->nqsets;
2150
2151                 if (!capable(CAP_NET_ADMIN))
2152                         return -EPERM;
2153                 if (copy_from_user(&t, useraddr, sizeof(t)))
2154                         return -EFAULT;
2155                 if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2156                         return -EINVAL;
2157                 if (t.qset_idx >= SGE_QSETS)
2158                         return -EINVAL;
2159                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2160                     !in_range(t.cong_thres, 0, 255) ||
2161                     !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2162                               MAX_TXQ_ENTRIES) ||
2163                     !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2164                               MAX_TXQ_ENTRIES) ||
2165                     !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2166                               MAX_CTRL_TXQ_ENTRIES) ||
2167                     !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2168                               MAX_RX_BUFFERS) ||
2169                     !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2170                               MAX_RX_JUMBO_BUFFERS) ||
2171                     !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2172                               MAX_RSPQ_ENTRIES))
2173                         return -EINVAL;
2174
2175                 if ((adapter->flags & FULL_INIT_DONE) &&
2176                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2177                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2178                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2179                         t.polling >= 0 || t.cong_thres >= 0))
2180                         return -EBUSY;
2181
2182                 /* Allow setting of any available qset when offload enabled */
2183                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2184                         q1 = 0;
2185                         for_each_port(adapter, i) {
2186                                 pi = adap2pinfo(adapter, i);
2187                                 nqsets += pi->first_qset + pi->nqsets;
2188                         }
2189                 }
2190
2191                 if (t.qset_idx < q1)
2192                         return -EINVAL;
2193                 if (t.qset_idx > q1 + nqsets - 1)
2194                         return -EINVAL;
2195
2196                 q = &adapter->params.sge.qset[t.qset_idx];
2197
2198                 if (t.rspq_size >= 0)
2199                         q->rspq_size = t.rspq_size;
2200                 if (t.fl_size[0] >= 0)
2201                         q->fl_size = t.fl_size[0];
2202                 if (t.fl_size[1] >= 0)
2203                         q->jumbo_size = t.fl_size[1];
2204                 if (t.txq_size[0] >= 0)
2205                         q->txq_size[0] = t.txq_size[0];
2206                 if (t.txq_size[1] >= 0)
2207                         q->txq_size[1] = t.txq_size[1];
2208                 if (t.txq_size[2] >= 0)
2209                         q->txq_size[2] = t.txq_size[2];
2210                 if (t.cong_thres >= 0)
2211                         q->cong_thres = t.cong_thres;
2212                 if (t.intr_lat >= 0) {
2213                         struct sge_qset *qs =
2214                                 &adapter->sge.qs[t.qset_idx];
2215
2216                         q->coalesce_usecs = t.intr_lat;
2217                         t3_update_qset_coalesce(qs, q);
2218                 }
2219                 if (t.polling >= 0) {
2220                         if (adapter->flags & USING_MSIX)
2221                                 q->polling = t.polling;
2222                         else {
2223                                 /* No polling with INTx for T3A */
2224                                 if (adapter->params.rev == 0 &&
2225                                         !(adapter->flags & USING_MSI))
2226                                         t.polling = 0;
2227
2228                                 for (i = 0; i < SGE_QSETS; i++) {
2229                                         q = &adapter->params.sge.
2230                                                 qset[i];
2231                                         q->polling = t.polling;
2232                                 }
2233                         }
2234                 }
2235
2236                 if (t.lro >= 0) {
2237                         if (t.lro)
2238                                 dev->wanted_features |= NETIF_F_GRO;
2239                         else
2240                                 dev->wanted_features &= ~NETIF_F_GRO;
2241                         netdev_update_features(dev);
2242                 }
2243
2244                 break;
2245         }
2246         case CHELSIO_GET_QSET_PARAMS:{
2247                 struct qset_params *q;
2248                 struct ch_qset_params t;
2249                 int q1 = pi->first_qset;
2250                 int nqsets = pi->nqsets;
2251                 int i;
2252
2253                 if (copy_from_user(&t, useraddr, sizeof(t)))
2254                         return -EFAULT;
2255
2256                 if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2257                         return -EINVAL;
2258
2259                 /* Display qsets for all ports when offload enabled */
2260                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2261                         q1 = 0;
2262                         for_each_port(adapter, i) {
2263                                 pi = adap2pinfo(adapter, i);
2264                                 nqsets = pi->first_qset + pi->nqsets;
2265                         }
2266                 }
2267
2268                 if (t.qset_idx >= nqsets)
2269                         return -EINVAL;
2270                 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2271
2272                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2273                 t.rspq_size = q->rspq_size;
2274                 t.txq_size[0] = q->txq_size[0];
2275                 t.txq_size[1] = q->txq_size[1];
2276                 t.txq_size[2] = q->txq_size[2];
2277                 t.fl_size[0] = q->fl_size;
2278                 t.fl_size[1] = q->jumbo_size;
2279                 t.polling = q->polling;
2280                 t.lro = !!(dev->features & NETIF_F_GRO);
2281                 t.intr_lat = q->coalesce_usecs;
2282                 t.cong_thres = q->cong_thres;
2283                 t.qnum = q1;
2284
2285                 if (adapter->flags & USING_MSIX)
2286                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2287                 else
2288                         t.vector = adapter->pdev->irq;
2289
2290                 if (copy_to_user(useraddr, &t, sizeof(t)))
2291                         return -EFAULT;
2292                 break;
2293         }
2294         case CHELSIO_SET_QSET_NUM:{
2295                 struct ch_reg edata;
2296                 unsigned int i, first_qset = 0, other_qsets = 0;
2297
2298                 if (!capable(CAP_NET_ADMIN))
2299                         return -EPERM;
2300                 if (adapter->flags & FULL_INIT_DONE)
2301                         return -EBUSY;
2302                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2303                         return -EFAULT;
2304                 if (edata.cmd != CHELSIO_SET_QSET_NUM)
2305                         return -EINVAL;
2306                 if (edata.val < 1 ||
2307                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2308                         return -EINVAL;
2309
2310                 for_each_port(adapter, i)
2311                         if (adapter->port[i] && adapter->port[i] != dev)
2312                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2313
2314                 if (edata.val + other_qsets > SGE_QSETS)
2315                         return -EINVAL;
2316
2317                 pi->nqsets = edata.val;
2318
2319                 for_each_port(adapter, i)
2320                         if (adapter->port[i]) {
2321                                 pi = adap2pinfo(adapter, i);
2322                                 pi->first_qset = first_qset;
2323                                 first_qset += pi->nqsets;
2324                         }
2325                 break;
2326         }
2327         case CHELSIO_GET_QSET_NUM:{
2328                 struct ch_reg edata;
2329
2330                 memset(&edata, 0, sizeof(struct ch_reg));
2331
2332                 edata.cmd = CHELSIO_GET_QSET_NUM;
2333                 edata.val = pi->nqsets;
2334                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2335                         return -EFAULT;
2336                 break;
2337         }
2338         case CHELSIO_LOAD_FW:{
2339                 u8 *fw_data;
2340                 struct ch_mem_range t;
2341
2342                 if (!capable(CAP_SYS_RAWIO))
2343                         return -EPERM;
2344                 if (copy_from_user(&t, useraddr, sizeof(t)))
2345                         return -EFAULT;
2346                 if (t.cmd != CHELSIO_LOAD_FW)
2347                         return -EINVAL;
2348                 /* Check t.len sanity ? */
2349                 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2350                 if (IS_ERR(fw_data))
2351                         return PTR_ERR(fw_data);
2352
2353                 ret = t3_load_fw(adapter, fw_data, t.len);
2354                 kfree(fw_data);
2355                 if (ret)
2356                         return ret;
2357                 break;
2358         }
2359         case CHELSIO_SETMTUTAB:{
2360                 struct ch_mtus m;
2361                 int i;
2362
2363                 if (!is_offload(adapter))
2364                         return -EOPNOTSUPP;
2365                 if (!capable(CAP_NET_ADMIN))
2366                         return -EPERM;
2367                 if (offload_running(adapter))
2368                         return -EBUSY;
2369                 if (copy_from_user(&m, useraddr, sizeof(m)))
2370                         return -EFAULT;
2371                 if (m.cmd != CHELSIO_SETMTUTAB)
2372                         return -EINVAL;
2373                 if (m.nmtus != NMTUS)
2374                         return -EINVAL;
2375                 if (m.mtus[0] < 81)     /* accommodate SACK */
2376                         return -EINVAL;
2377
2378                 /* MTUs must be in ascending order */
2379                 for (i = 1; i < NMTUS; ++i)
2380                         if (m.mtus[i] < m.mtus[i - 1])
2381                                 return -EINVAL;
2382
2383                 memcpy(adapter->params.mtus, m.mtus,
2384                         sizeof(adapter->params.mtus));
2385                 break;
2386         }
2387         case CHELSIO_GET_PM:{
2388                 struct tp_params *p = &adapter->params.tp;
2389                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2390
2391                 if (!is_offload(adapter))
2392                         return -EOPNOTSUPP;
2393                 m.tx_pg_sz = p->tx_pg_size;
2394                 m.tx_num_pg = p->tx_num_pgs;
2395                 m.rx_pg_sz = p->rx_pg_size;
2396                 m.rx_num_pg = p->rx_num_pgs;
2397                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2398                 if (copy_to_user(useraddr, &m, sizeof(m)))
2399                         return -EFAULT;
2400                 break;
2401         }
2402         case CHELSIO_SET_PM:{
2403                 struct ch_pm m;
2404                 struct tp_params *p = &adapter->params.tp;
2405
2406                 if (!is_offload(adapter))
2407                         return -EOPNOTSUPP;
2408                 if (!capable(CAP_NET_ADMIN))
2409                         return -EPERM;
2410                 if (adapter->flags & FULL_INIT_DONE)
2411                         return -EBUSY;
2412                 if (copy_from_user(&m, useraddr, sizeof(m)))
2413                         return -EFAULT;
2414                 if (m.cmd != CHELSIO_SET_PM)
2415                         return -EINVAL;
2416                 if (!is_power_of_2(m.rx_pg_sz) ||
2417                         !is_power_of_2(m.tx_pg_sz))
2418                         return -EINVAL; /* not power of 2 */
2419                 if (!(m.rx_pg_sz & 0x14000))
2420                         return -EINVAL; /* not 16KB or 64KB */
2421                 if (!(m.tx_pg_sz & 0x1554000))
2422                         return -EINVAL;
2423                 if (m.tx_num_pg == -1)
2424                         m.tx_num_pg = p->tx_num_pgs;
2425                 if (m.rx_num_pg == -1)
2426                         m.rx_num_pg = p->rx_num_pgs;
2427                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2428                         return -EINVAL;
2429                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2430                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2431                         return -EINVAL;
2432                 p->rx_pg_size = m.rx_pg_sz;
2433                 p->tx_pg_size = m.tx_pg_sz;
2434                 p->rx_num_pgs = m.rx_num_pg;
2435                 p->tx_num_pgs = m.tx_num_pg;
2436                 break;
2437         }
2438         case CHELSIO_GET_MEM:{
2439                 struct ch_mem_range t;
2440                 struct mc7 *mem;
2441                 u64 buf[32];
2442
2443                 if (!is_offload(adapter))
2444                         return -EOPNOTSUPP;
2445                 if (!capable(CAP_NET_ADMIN))
2446                         return -EPERM;
2447                 if (!(adapter->flags & FULL_INIT_DONE))
2448                         return -EIO;    /* need the memory controllers */
2449                 if (copy_from_user(&t, useraddr, sizeof(t)))
2450                         return -EFAULT;
2451                 if (t.cmd != CHELSIO_GET_MEM)
2452                         return -EINVAL;
2453                 if ((t.addr & 7) || (t.len & 7))
2454                         return -EINVAL;
2455                 if (t.mem_id == MEM_CM)
2456                         mem = &adapter->cm;
2457                 else if (t.mem_id == MEM_PMRX)
2458                         mem = &adapter->pmrx;
2459                 else if (t.mem_id == MEM_PMTX)
2460                         mem = &adapter->pmtx;
2461                 else
2462                         return -EINVAL;
2463
2464                 /*
2465                  * Version scheme:
2466                  * bits 0..9: chip version
2467                  * bits 10..15: chip revision
2468                  */
2469                 t.version = 3 | (adapter->params.rev << 10);
2470                 if (copy_to_user(useraddr, &t, sizeof(t)))
2471                         return -EFAULT;
2472
2473                 /*
2474                  * Read 256 bytes at a time as len can be large and we don't
2475                  * want to use huge intermediate buffers.
2476                  */
2477                 useraddr += sizeof(t);  /* advance to start of buffer */
2478                 while (t.len) {
2479                         unsigned int chunk =
2480                                 min_t(unsigned int, t.len, sizeof(buf));
2481
2482                         ret =
2483                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2484                                                 buf);
2485                         if (ret)
2486                                 return ret;
2487                         if (copy_to_user(useraddr, buf, chunk))
2488                                 return -EFAULT;
2489                         useraddr += chunk;
2490                         t.addr += chunk;
2491                         t.len -= chunk;
2492                 }
2493                 break;
2494         }
2495         case CHELSIO_SET_TRACE_FILTER:{
2496                 struct ch_trace t;
2497                 const struct trace_params *tp;
2498
2499                 if (!capable(CAP_NET_ADMIN))
2500                         return -EPERM;
2501                 if (!offload_running(adapter))
2502                         return -EAGAIN;
2503                 if (copy_from_user(&t, useraddr, sizeof(t)))
2504                         return -EFAULT;
2505                 if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2506                         return -EINVAL;
2507
2508                 tp = (const struct trace_params *)&t.sip;
2509                 if (t.config_tx)
2510                         t3_config_trace_filter(adapter, tp, 0,
2511                                                 t.invert_match,
2512                                                 t.trace_tx);
2513                 if (t.config_rx)
2514                         t3_config_trace_filter(adapter, tp, 1,
2515                                                 t.invert_match,
2516                                                 t.trace_rx);
2517                 break;
2518         }
2519         default:
2520                 return -EOPNOTSUPP;
2521         }
2522         return 0;
2523 }
2524
2525 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2526 {
2527         struct mii_ioctl_data *data = if_mii(req);
2528         struct port_info *pi = netdev_priv(dev);
2529         struct adapter *adapter = pi->adapter;
2530
2531         switch (cmd) {
2532         case SIOCGMIIREG:
2533         case SIOCSMIIREG:
2534                 /* Convert phy_id from older PRTAD/DEVAD format */
2535                 if (is_10G(adapter) &&
2536                     !mdio_phy_id_is_c45(data->phy_id) &&
2537                     (data->phy_id & 0x1f00) &&
2538                     !(data->phy_id & 0xe0e0))
2539                         data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2540                                                        data->phy_id & 0x1f);
2541                 /* FALLTHRU */
2542         case SIOCGMIIPHY:
2543                 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2544         case SIOCCHIOCTL:
2545                 return cxgb_extension_ioctl(dev, req->ifr_data);
2546         default:
2547                 return -EOPNOTSUPP;
2548         }
2549 }
2550
2551 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2552 {
2553         struct port_info *pi = netdev_priv(dev);
2554         struct adapter *adapter = pi->adapter;
2555         int ret;
2556
2557         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2558                 return ret;
2559         dev->mtu = new_mtu;
2560         init_port_mtus(adapter);
2561         if (adapter->params.rev == 0 && offload_running(adapter))
2562                 t3_load_mtus(adapter, adapter->params.mtus,
2563                              adapter->params.a_wnd, adapter->params.b_wnd,
2564                              adapter->port[0]->mtu);
2565         return 0;
2566 }
2567
2568 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2569 {
2570         struct port_info *pi = netdev_priv(dev);
2571         struct adapter *adapter = pi->adapter;
2572         struct sockaddr *addr = p;
2573
2574         if (!is_valid_ether_addr(addr->sa_data))
2575                 return -EADDRNOTAVAIL;
2576
2577         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2578         t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2579         if (offload_running(adapter))
2580                 write_smt_entry(adapter, pi->port_id);
2581         return 0;
2582 }
2583
2584 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2585         netdev_features_t features)
2586 {
2587         /*
2588          * Since there is no support for separate rx/tx vlan accel
2589          * enable/disable make sure tx flag is always in same state as rx.
2590          */
2591         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2592                 features |= NETIF_F_HW_VLAN_CTAG_TX;
2593         else
2594                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2595
2596         return features;
2597 }
2598
2599 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2600 {
2601         netdev_features_t changed = dev->features ^ features;
2602
2603         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2604                 cxgb_vlan_mode(dev, features);
2605
2606         return 0;
2607 }
2608
2609 #ifdef CONFIG_NET_POLL_CONTROLLER
2610 static void cxgb_netpoll(struct net_device *dev)
2611 {
2612         struct port_info *pi = netdev_priv(dev);
2613         struct adapter *adapter = pi->adapter;
2614         int qidx;
2615
2616         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2617                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2618                 void *source;
2619
2620                 if (adapter->flags & USING_MSIX)
2621                         source = qs;
2622                 else
2623                         source = adapter;
2624
2625                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2626         }
2627 }
2628 #endif
2629
2630 /*
2631  * Periodic accumulation of MAC statistics.
2632  */
2633 static void mac_stats_update(struct adapter *adapter)
2634 {
2635         int i;
2636
2637         for_each_port(adapter, i) {
2638                 struct net_device *dev = adapter->port[i];
2639                 struct port_info *p = netdev_priv(dev);
2640
2641                 if (netif_running(dev)) {
2642                         spin_lock(&adapter->stats_lock);
2643                         t3_mac_update_stats(&p->mac);
2644                         spin_unlock(&adapter->stats_lock);
2645                 }
2646         }
2647 }
2648
2649 static void check_link_status(struct adapter *adapter)
2650 {
2651         int i;
2652
2653         for_each_port(adapter, i) {
2654                 struct net_device *dev = adapter->port[i];
2655                 struct port_info *p = netdev_priv(dev);
2656                 int link_fault;
2657
2658                 spin_lock_irq(&adapter->work_lock);
2659                 link_fault = p->link_fault;
2660                 spin_unlock_irq(&adapter->work_lock);
2661
2662                 if (link_fault) {
2663                         t3_link_fault(adapter, i);
2664                         continue;
2665                 }
2666
2667                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2668                         t3_xgm_intr_disable(adapter, i);
2669                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2670
2671                         t3_link_changed(adapter, i);
2672                         t3_xgm_intr_enable(adapter, i);
2673                 }
2674         }
2675 }
2676
2677 static void check_t3b2_mac(struct adapter *adapter)
2678 {
2679         int i;
2680
2681         if (!rtnl_trylock())    /* synchronize with ifdown */
2682                 return;
2683
2684         for_each_port(adapter, i) {
2685                 struct net_device *dev = adapter->port[i];
2686                 struct port_info *p = netdev_priv(dev);
2687                 int status;
2688
2689                 if (!netif_running(dev))
2690                         continue;
2691
2692                 status = 0;
2693                 if (netif_running(dev) && netif_carrier_ok(dev))
2694                         status = t3b2_mac_watchdog_task(&p->mac);
2695                 if (status == 1)
2696                         p->mac.stats.num_toggled++;
2697                 else if (status == 2) {
2698                         struct cmac *mac = &p->mac;
2699
2700                         t3_mac_set_mtu(mac, dev->mtu);
2701                         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2702                         cxgb_set_rxmode(dev);
2703                         t3_link_start(&p->phy, mac, &p->link_config);
2704                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2705                         t3_port_intr_enable(adapter, p->port_id);
2706                         p->mac.stats.num_resets++;
2707                 }
2708         }
2709         rtnl_unlock();
2710 }
2711
2712
2713 static void t3_adap_check_task(struct work_struct *work)
2714 {
2715         struct adapter *adapter = container_of(work, struct adapter,
2716                                                adap_check_task.work);
2717         const struct adapter_params *p = &adapter->params;
2718         int port;
2719         unsigned int v, status, reset;
2720
2721         adapter->check_task_cnt++;
2722
2723         check_link_status(adapter);
2724
2725         /* Accumulate MAC stats if needed */
2726         if (!p->linkpoll_period ||
2727             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2728             p->stats_update_period) {
2729                 mac_stats_update(adapter);
2730                 adapter->check_task_cnt = 0;
2731         }
2732
2733         if (p->rev == T3_REV_B2)
2734                 check_t3b2_mac(adapter);
2735
2736         /*
2737          * Scan the XGMAC's to check for various conditions which we want to
2738          * monitor in a periodic polling manner rather than via an interrupt
2739          * condition.  This is used for conditions which would otherwise flood
2740          * the system with interrupts and we only really need to know that the
2741          * conditions are "happening" ...  For each condition we count the
2742          * detection of the condition and reset it for the next polling loop.
2743          */
2744         for_each_port(adapter, port) {
2745                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2746                 u32 cause;
2747
2748                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2749                 reset = 0;
2750                 if (cause & F_RXFIFO_OVERFLOW) {
2751                         mac->stats.rx_fifo_ovfl++;
2752                         reset |= F_RXFIFO_OVERFLOW;
2753                 }
2754
2755                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2756         }
2757
2758         /*
2759          * We do the same as above for FL_EMPTY interrupts.
2760          */
2761         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2762         reset = 0;
2763
2764         if (status & F_FLEMPTY) {
2765                 struct sge_qset *qs = &adapter->sge.qs[0];
2766                 int i = 0;
2767
2768                 reset |= F_FLEMPTY;
2769
2770                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2771                     0xffff;
2772
2773                 while (v) {
2774                         qs->fl[i].empty += (v & 1);
2775                         if (i)
2776                                 qs++;
2777                         i ^= 1;
2778                         v >>= 1;
2779                 }
2780         }
2781
2782         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2783
2784         /* Schedule the next check update if any port is active. */
2785         spin_lock_irq(&adapter->work_lock);
2786         if (adapter->open_device_map & PORT_MASK)
2787                 schedule_chk_task(adapter);
2788         spin_unlock_irq(&adapter->work_lock);
2789 }
2790
2791 static void db_full_task(struct work_struct *work)
2792 {
2793         struct adapter *adapter = container_of(work, struct adapter,
2794                                                db_full_task);
2795
2796         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2797 }
2798
2799 static void db_empty_task(struct work_struct *work)
2800 {
2801         struct adapter *adapter = container_of(work, struct adapter,
2802                                                db_empty_task);
2803
2804         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2805 }
2806
2807 static void db_drop_task(struct work_struct *work)
2808 {
2809         struct adapter *adapter = container_of(work, struct adapter,
2810                                                db_drop_task);
2811         unsigned long delay = 1000;
2812         unsigned short r;
2813
2814         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2815
2816         /*
2817          * Sleep a while before ringing the driver qset dbs.
2818          * The delay is between 1000-2023 usecs.
2819          */
2820         get_random_bytes(&r, 2);
2821         delay += r & 1023;
2822         set_current_state(TASK_UNINTERRUPTIBLE);
2823         schedule_timeout(usecs_to_jiffies(delay));
2824         ring_dbs(adapter);
2825 }
2826
2827 /*
2828  * Processes external (PHY) interrupts in process context.
2829  */
2830 static void ext_intr_task(struct work_struct *work)
2831 {
2832         struct adapter *adapter = container_of(work, struct adapter,
2833                                                ext_intr_handler_task);
2834         int i;
2835
2836         /* Disable link fault interrupts */
2837         for_each_port(adapter, i) {
2838                 struct net_device *dev = adapter->port[i];
2839                 struct port_info *p = netdev_priv(dev);
2840
2841                 t3_xgm_intr_disable(adapter, i);
2842                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2843         }
2844
2845         /* Re-enable link fault interrupts */
2846         t3_phy_intr_handler(adapter);
2847
2848         for_each_port(adapter, i)
2849                 t3_xgm_intr_enable(adapter, i);
2850
2851         /* Now reenable external interrupts */
2852         spin_lock_irq(&adapter->work_lock);
2853         if (adapter->slow_intr_mask) {
2854                 adapter->slow_intr_mask |= F_T3DBG;
2855                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2856                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2857                              adapter->slow_intr_mask);
2858         }
2859         spin_unlock_irq(&adapter->work_lock);
2860 }
2861
2862 /*
2863  * Interrupt-context handler for external (PHY) interrupts.
2864  */
2865 void t3_os_ext_intr_handler(struct adapter *adapter)
2866 {
2867         /*
2868          * Schedule a task to handle external interrupts as they may be slow
2869          * and we use a mutex to protect MDIO registers.  We disable PHY
2870          * interrupts in the meantime and let the task reenable them when
2871          * it's done.
2872          */
2873         spin_lock(&adapter->work_lock);
2874         if (adapter->slow_intr_mask) {
2875                 adapter->slow_intr_mask &= ~F_T3DBG;
2876                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2877                              adapter->slow_intr_mask);
2878                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2879         }
2880         spin_unlock(&adapter->work_lock);
2881 }
2882
2883 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2884 {
2885         struct net_device *netdev = adapter->port[port_id];
2886         struct port_info *pi = netdev_priv(netdev);
2887
2888         spin_lock(&adapter->work_lock);
2889         pi->link_fault = 1;
2890         spin_unlock(&adapter->work_lock);
2891 }
2892
2893 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2894 {
2895         int i, ret = 0;
2896
2897         if (is_offload(adapter) &&
2898             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2899                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2900                 offload_close(&adapter->tdev);
2901         }
2902
2903         /* Stop all ports */
2904         for_each_port(adapter, i) {
2905                 struct net_device *netdev = adapter->port[i];
2906
2907                 if (netif_running(netdev))
2908                         __cxgb_close(netdev, on_wq);
2909         }
2910
2911         /* Stop SGE timers */
2912         t3_stop_sge_timers(adapter);
2913
2914         adapter->flags &= ~FULL_INIT_DONE;
2915
2916         if (reset)
2917                 ret = t3_reset_adapter(adapter);
2918
2919         pci_disable_device(adapter->pdev);
2920
2921         return ret;
2922 }
2923
2924 static int t3_reenable_adapter(struct adapter *adapter)
2925 {
2926         if (pci_enable_device(adapter->pdev)) {
2927                 dev_err(&adapter->pdev->dev,
2928                         "Cannot re-enable PCI device after reset.\n");
2929                 goto err;
2930         }
2931         pci_set_master(adapter->pdev);
2932         pci_restore_state(adapter->pdev);
2933         pci_save_state(adapter->pdev);
2934
2935         /* Free sge resources */
2936         t3_free_sge_resources(adapter);
2937
2938         if (t3_replay_prep_adapter(adapter))
2939                 goto err;
2940
2941         return 0;
2942 err:
2943         return -1;
2944 }
2945
2946 static void t3_resume_ports(struct adapter *adapter)
2947 {
2948         int i;
2949
2950         /* Restart the ports */
2951         for_each_port(adapter, i) {
2952                 struct net_device *netdev = adapter->port[i];
2953
2954                 if (netif_running(netdev)) {
2955                         if (cxgb_open(netdev)) {
2956                                 dev_err(&adapter->pdev->dev,
2957                                         "can't bring device back up"
2958                                         " after reset\n");
2959                                 continue;
2960                         }
2961                 }
2962         }
2963
2964         if (is_offload(adapter) && !ofld_disable)
2965                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2966 }
2967
2968 /*
2969  * processes a fatal error.
2970  * Bring the ports down, reset the chip, bring the ports back up.
2971  */
2972 static void fatal_error_task(struct work_struct *work)
2973 {
2974         struct adapter *adapter = container_of(work, struct adapter,
2975                                                fatal_error_handler_task);
2976         int err = 0;
2977
2978         rtnl_lock();
2979         err = t3_adapter_error(adapter, 1, 1);
2980         if (!err)
2981                 err = t3_reenable_adapter(adapter);
2982         if (!err)
2983                 t3_resume_ports(adapter);
2984
2985         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2986         rtnl_unlock();
2987 }
2988
2989 void t3_fatal_err(struct adapter *adapter)
2990 {
2991         unsigned int fw_status[4];
2992
2993         if (adapter->flags & FULL_INIT_DONE) {
2994                 t3_sge_stop(adapter);
2995                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2996                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2997                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2998                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2999
3000                 spin_lock(&adapter->work_lock);
3001                 t3_intr_disable(adapter);
3002                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
3003                 spin_unlock(&adapter->work_lock);
3004         }
3005         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3006         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3007                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3008                          fw_status[0], fw_status[1],
3009                          fw_status[2], fw_status[3]);
3010 }
3011
3012 /**
3013  * t3_io_error_detected - called when PCI error is detected
3014  * @pdev: Pointer to PCI device
3015  * @state: The current pci connection state
3016  *
3017  * This function is called after a PCI bus error affecting
3018  * this device has been detected.
3019  */
3020 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3021                                              pci_channel_state_t state)
3022 {
3023         struct adapter *adapter = pci_get_drvdata(pdev);
3024
3025         if (state == pci_channel_io_perm_failure)
3026                 return PCI_ERS_RESULT_DISCONNECT;
3027
3028         t3_adapter_error(adapter, 0, 0);
3029
3030         /* Request a slot reset. */
3031         return PCI_ERS_RESULT_NEED_RESET;
3032 }
3033
3034 /**
3035  * t3_io_slot_reset - called after the pci bus has been reset.
3036  * @pdev: Pointer to PCI device
3037  *
3038  * Restart the card from scratch, as if from a cold-boot.
3039  */
3040 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3041 {
3042         struct adapter *adapter = pci_get_drvdata(pdev);
3043
3044         if (!t3_reenable_adapter(adapter))
3045                 return PCI_ERS_RESULT_RECOVERED;
3046
3047         return PCI_ERS_RESULT_DISCONNECT;
3048 }
3049
3050 /**
3051  * t3_io_resume - called when traffic can start flowing again.
3052  * @pdev: Pointer to PCI device
3053  *
3054  * This callback is called when the error recovery driver tells us that
3055  * its OK to resume normal operation.
3056  */
3057 static void t3_io_resume(struct pci_dev *pdev)
3058 {
3059         struct adapter *adapter = pci_get_drvdata(pdev);
3060
3061         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3062                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
3063
3064         rtnl_lock();
3065         t3_resume_ports(adapter);
3066         rtnl_unlock();
3067 }
3068
3069 static const struct pci_error_handlers t3_err_handler = {
3070         .error_detected = t3_io_error_detected,
3071         .slot_reset = t3_io_slot_reset,
3072         .resume = t3_io_resume,
3073 };
3074
3075 /*
3076  * Set the number of qsets based on the number of CPUs and the number of ports,
3077  * not to exceed the number of available qsets, assuming there are enough qsets
3078  * per port in HW.
3079  */
3080 static void set_nqsets(struct adapter *adap)
3081 {
3082         int i, j = 0;
3083         int num_cpus = netif_get_num_default_rss_queues();
3084         int hwports = adap->params.nports;
3085         int nqsets = adap->msix_nvectors - 1;
3086
3087         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3088                 if (hwports == 2 &&
3089                     (hwports * nqsets > SGE_QSETS ||
3090                      num_cpus >= nqsets / hwports))
3091                         nqsets /= hwports;
3092                 if (nqsets > num_cpus)
3093                         nqsets = num_cpus;
3094                 if (nqsets < 1 || hwports == 4)
3095                         nqsets = 1;
3096         } else
3097                 nqsets = 1;
3098
3099         for_each_port(adap, i) {
3100                 struct port_info *pi = adap2pinfo(adap, i);
3101
3102                 pi->first_qset = j;
3103                 pi->nqsets = nqsets;
3104                 j = pi->first_qset + nqsets;
3105
3106                 dev_info(&adap->pdev->dev,
3107                          "Port %d using %d queue sets.\n", i, nqsets);
3108         }
3109 }
3110
3111 static int cxgb_enable_msix(struct adapter *adap)
3112 {
3113         struct msix_entry entries[SGE_QSETS + 1];
3114         int vectors;
3115         int i;
3116
3117         vectors = ARRAY_SIZE(entries);
3118         for (i = 0; i < vectors; ++i)
3119                 entries[i].entry = i;
3120
3121         vectors = pci_enable_msix_range(adap->pdev, entries,
3122                                         adap->params.nports + 1, vectors);
3123         if (vectors < 0)
3124                 return vectors;
3125
3126         for (i = 0; i < vectors; ++i)
3127                 adap->msix_info[i].vec = entries[i].vector;
3128         adap->msix_nvectors = vectors;
3129
3130         return 0;
3131 }
3132
3133 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3134 {
3135         static const char *pci_variant[] = {
3136                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3137         };
3138
3139         int i;
3140         char buf[80];
3141
3142         if (is_pcie(adap))
3143                 snprintf(buf, sizeof(buf), "%s x%d",
3144                          pci_variant[adap->params.pci.variant],
3145                          adap->params.pci.width);
3146         else
3147                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3148                          pci_variant[adap->params.pci.variant],
3149                          adap->params.pci.speed, adap->params.pci.width);
3150
3151         for_each_port(adap, i) {
3152                 struct net_device *dev = adap->port[i];
3153                 const struct port_info *pi = netdev_priv(dev);
3154
3155                 if (!test_bit(i, &adap->registered_device_map))
3156                         continue;
3157                 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3158                             ai->desc, pi->phy.desc,
3159                             is_offload(adap) ? "R" : "", adap->params.rev, buf,
3160                             (adap->flags & USING_MSIX) ? " MSI-X" :
3161                             (adap->flags & USING_MSI) ? " MSI" : "");
3162                 if (adap->name == dev->name && adap->params.vpd.mclk)
3163                         pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3164                                adap->name, t3_mc7_size(&adap->cm) >> 20,
3165                                t3_mc7_size(&adap->pmtx) >> 20,
3166                                t3_mc7_size(&adap->pmrx) >> 20,
3167                                adap->params.vpd.sn);
3168         }
3169 }
3170
3171 static const struct net_device_ops cxgb_netdev_ops = {
3172         .ndo_open               = cxgb_open,
3173         .ndo_stop               = cxgb_close,
3174         .ndo_start_xmit         = t3_eth_xmit,
3175         .ndo_get_stats          = cxgb_get_stats,
3176         .ndo_validate_addr      = eth_validate_addr,
3177         .ndo_set_rx_mode        = cxgb_set_rxmode,
3178         .ndo_do_ioctl           = cxgb_ioctl,
3179         .ndo_change_mtu         = cxgb_change_mtu,
3180         .ndo_set_mac_address    = cxgb_set_mac_addr,
3181         .ndo_fix_features       = cxgb_fix_features,
3182         .ndo_set_features       = cxgb_set_features,
3183 #ifdef CONFIG_NET_POLL_CONTROLLER
3184         .ndo_poll_controller    = cxgb_netpoll,
3185 #endif
3186 };
3187
3188 static void cxgb3_init_iscsi_mac(struct net_device *dev)
3189 {
3190         struct port_info *pi = netdev_priv(dev);
3191
3192         memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3193         pi->iscsic.mac_addr[3] |= 0x80;
3194 }
3195
3196 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3197 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3198                         NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3199 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3200 {
3201         int i, err, pci_using_dac = 0;
3202         resource_size_t mmio_start, mmio_len;
3203         const struct adapter_info *ai;
3204         struct adapter *adapter = NULL;
3205         struct port_info *pi;
3206
3207         pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3208
3209         if (!cxgb3_wq) {
3210                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3211                 if (!cxgb3_wq) {
3212                         pr_err("cannot initialize work queue\n");
3213                         return -ENOMEM;
3214                 }
3215         }
3216
3217         err = pci_enable_device(pdev);
3218         if (err) {
3219                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3220                 goto out;
3221         }
3222
3223         err = pci_request_regions(pdev, DRV_NAME);
3224         if (err) {
3225                 /* Just info, some other driver may have claimed the device. */
3226                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3227                 goto out_disable_device;
3228         }
3229
3230         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3231                 pci_using_dac = 1;
3232                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3233                 if (err) {
3234                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3235                                "coherent allocations\n");
3236                         goto out_release_regions;
3237                 }
3238         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3239                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3240                 goto out_release_regions;
3241         }
3242
3243         pci_set_master(pdev);
3244         pci_save_state(pdev);
3245
3246         mmio_start = pci_resource_start(pdev, 0);
3247         mmio_len = pci_resource_len(pdev, 0);
3248         ai = t3_get_adapter_info(ent->driver_data);
3249
3250         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3251         if (!adapter) {
3252                 err = -ENOMEM;
3253                 goto out_release_regions;
3254         }
3255
3256         adapter->nofail_skb =
3257                 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3258         if (!adapter->nofail_skb) {
3259                 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3260                 err = -ENOMEM;
3261                 goto out_free_adapter;
3262         }
3263
3264         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3265         if (!adapter->regs) {
3266                 dev_err(&pdev->dev, "cannot map device registers\n");
3267                 err = -ENOMEM;
3268                 goto out_free_adapter_nofail;
3269         }
3270
3271         adapter->pdev = pdev;
3272         adapter->name = pci_name(pdev);
3273         adapter->msg_enable = dflt_msg_enable;
3274         adapter->mmio_len = mmio_len;
3275
3276         mutex_init(&adapter->mdio_lock);
3277         spin_lock_init(&adapter->work_lock);
3278         spin_lock_init(&adapter->stats_lock);
3279
3280         INIT_LIST_HEAD(&adapter->adapter_list);
3281         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3282         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3283
3284         INIT_WORK(&adapter->db_full_task, db_full_task);
3285         INIT_WORK(&adapter->db_empty_task, db_empty_task);
3286         INIT_WORK(&adapter->db_drop_task, db_drop_task);
3287
3288         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3289
3290         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3291                 struct net_device *netdev;
3292
3293                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3294                 if (!netdev) {
3295                         err = -ENOMEM;
3296                         goto out_free_dev;
3297                 }
3298
3299                 SET_NETDEV_DEV(netdev, &pdev->dev);
3300
3301                 adapter->port[i] = netdev;
3302                 pi = netdev_priv(netdev);
3303                 pi->adapter = adapter;
3304                 pi->port_id = i;
3305                 netif_carrier_off(netdev);
3306                 netdev->irq = pdev->irq;
3307                 netdev->mem_start = mmio_start;
3308                 netdev->mem_end = mmio_start + mmio_len - 1;
3309                 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3310                         NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3311                 netdev->features |= netdev->hw_features |
3312                                     NETIF_F_HW_VLAN_CTAG_TX;
3313                 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3314                 if (pci_using_dac)
3315                         netdev->features |= NETIF_F_HIGHDMA;
3316
3317                 netdev->netdev_ops = &cxgb_netdev_ops;
3318                 netdev->ethtool_ops = &cxgb_ethtool_ops;
3319                 netdev->min_mtu = 81;
3320                 netdev->max_mtu = ETH_MAX_MTU;
3321         }
3322
3323         pci_set_drvdata(pdev, adapter);
3324         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3325                 err = -ENODEV;
3326                 goto out_free_dev;
3327         }
3328
3329         /*
3330          * The card is now ready to go.  If any errors occur during device
3331          * registration we do not fail the whole card but rather proceed only
3332          * with the ports we manage to register successfully.  However we must
3333          * register at least one net device.
3334          */
3335         for_each_port(adapter, i) {
3336                 err = register_netdev(adapter->port[i]);
3337                 if (err)
3338                         dev_warn(&pdev->dev,
3339                                  "cannot register net device %s, skipping\n",
3340                                  adapter->port[i]->name);
3341                 else {
3342                         /*
3343                          * Change the name we use for messages to the name of
3344                          * the first successfully registered interface.
3345                          */
3346                         if (!adapter->registered_device_map)
3347                                 adapter->name = adapter->port[i]->name;
3348
3349                         __set_bit(i, &adapter->registered_device_map);
3350                 }
3351         }
3352         if (!adapter->registered_device_map) {
3353                 dev_err(&pdev->dev, "could not register any net devices\n");
3354                 goto out_free_dev;
3355         }
3356
3357         for_each_port(adapter, i)
3358                 cxgb3_init_iscsi_mac(adapter->port[i]);
3359
3360         /* Driver's ready. Reflect it on LEDs */
3361         t3_led_ready(adapter);
3362
3363         if (is_offload(adapter)) {
3364                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3365                 cxgb3_adapter_ofld(adapter);
3366         }
3367
3368         /* See what interrupts we'll be using */
3369         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3370                 adapter->flags |= USING_MSIX;
3371         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3372                 adapter->flags |= USING_MSI;
3373
3374         set_nqsets(adapter);
3375
3376         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3377                                  &cxgb3_attr_group);
3378
3379         print_port_info(adapter, ai);
3380         return 0;
3381
3382 out_free_dev:
3383         iounmap(adapter->regs);
3384         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3385                 if (adapter->port[i])
3386                         free_netdev(adapter->port[i]);
3387
3388 out_free_adapter_nofail:
3389         kfree_skb(adapter->nofail_skb);
3390
3391 out_free_adapter:
3392         kfree(adapter);
3393
3394 out_release_regions:
3395         pci_release_regions(pdev);
3396 out_disable_device:
3397         pci_disable_device(pdev);
3398 out:
3399         return err;
3400 }
3401
3402 static void remove_one(struct pci_dev *pdev)
3403 {
3404         struct adapter *adapter = pci_get_drvdata(pdev);
3405
3406         if (adapter) {
3407                 int i;
3408
3409                 t3_sge_stop(adapter);
3410                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3411                                    &cxgb3_attr_group);
3412
3413                 if (is_offload(adapter)) {
3414                         cxgb3_adapter_unofld(adapter);
3415                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3416                                      &adapter->open_device_map))
3417                                 offload_close(&adapter->tdev);
3418                 }
3419
3420                 for_each_port(adapter, i)
3421                     if (test_bit(i, &adapter->registered_device_map))
3422                         unregister_netdev(adapter->port[i]);
3423
3424                 t3_stop_sge_timers(adapter);
3425                 t3_free_sge_resources(adapter);
3426                 cxgb_disable_msi(adapter);
3427
3428                 for_each_port(adapter, i)
3429                         if (adapter->port[i])
3430                                 free_netdev(adapter->port[i]);
3431
3432                 iounmap(adapter->regs);
3433                 if (adapter->nofail_skb)
3434                         kfree_skb(adapter->nofail_skb);
3435                 kfree(adapter);
3436                 pci_release_regions(pdev);
3437                 pci_disable_device(pdev);
3438         }
3439 }
3440
3441 static struct pci_driver driver = {
3442         .name = DRV_NAME,
3443         .id_table = cxgb3_pci_tbl,
3444         .probe = init_one,
3445         .remove = remove_one,
3446         .err_handler = &t3_err_handler,
3447 };
3448
3449 static int __init cxgb3_init_module(void)
3450 {
3451         int ret;
3452
3453         cxgb3_offload_init();
3454
3455         ret = pci_register_driver(&driver);
3456         return ret;
3457 }
3458
3459 static void __exit cxgb3_cleanup_module(void)
3460 {
3461         pci_unregister_driver(&driver);
3462         if (cxgb3_wq)
3463                 destroy_workqueue(cxgb3_wq);
3464 }
3465
3466 module_init(cxgb3_init_module);
3467 module_exit(cxgb3_cleanup_module);