GNU Linux-libre 4.9-gnu1
[releases.git] / drivers / net / ethernet / chelsio / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/init.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/if_vlan.h>
43 #include <linux/mdio.h>
44 #include <linux/sockios.h>
45 #include <linux/workqueue.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/stringify.h>
51 #include <linux/sched.h>
52 #include <linux/slab.h>
53 #include <asm/uaccess.h>
54
55 #include "common.h"
56 #include "cxgb3_ioctl.h"
57 #include "regs.h"
58 #include "cxgb3_offload.h"
59 #include "version.h"
60
61 #include "cxgb3_ctl_defs.h"
62 #include "t3_cpl.h"
63 #include "firmware_exports.h"
64
65 enum {
66         MAX_TXQ_ENTRIES = 16384,
67         MAX_CTRL_TXQ_ENTRIES = 1024,
68         MAX_RSPQ_ENTRIES = 16384,
69         MAX_RX_BUFFERS = 16384,
70         MAX_RX_JUMBO_BUFFERS = 16384,
71         MIN_TXQ_ENTRIES = 4,
72         MIN_CTRL_TXQ_ENTRIES = 4,
73         MIN_RSPQ_ENTRIES = 32,
74         MIN_FL_ENTRIES = 32
75 };
76
77 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
78
79 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82
83 #define EEPROM_MAGIC 0x38E2F10C
84
85 #define CH_DEVICE(devid, idx) \
86         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
87
88 static const struct pci_device_id cxgb3_pci_tbl[] = {
89         CH_DEVICE(0x20, 0),     /* PE9000 */
90         CH_DEVICE(0x21, 1),     /* T302E */
91         CH_DEVICE(0x22, 2),     /* T310E */
92         CH_DEVICE(0x23, 3),     /* T320X */
93         CH_DEVICE(0x24, 1),     /* T302X */
94         CH_DEVICE(0x25, 3),     /* T320E */
95         CH_DEVICE(0x26, 2),     /* T310X */
96         CH_DEVICE(0x30, 2),     /* T3B10 */
97         CH_DEVICE(0x31, 3),     /* T3B20 */
98         CH_DEVICE(0x32, 1),     /* T3B02 */
99         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
100         CH_DEVICE(0x36, 3),     /* S320E-CR */
101         CH_DEVICE(0x37, 7),     /* N320E-G2 */
102         {0,}
103 };
104
105 MODULE_DESCRIPTION(DRV_DESC);
106 MODULE_AUTHOR("Chelsio Communications");
107 MODULE_LICENSE("Dual BSD/GPL");
108 MODULE_VERSION(DRV_VERSION);
109 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
110
111 static int dflt_msg_enable = DFLT_MSG_ENABLE;
112
113 module_param(dflt_msg_enable, int, 0644);
114 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
115
116 /*
117  * The driver uses the best interrupt scheme available on a platform in the
118  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
119  * of these schemes the driver may consider as follows:
120  *
121  * msi = 2: choose from among all three options
122  * msi = 1: only consider MSI and pin interrupts
123  * msi = 0: force pin interrupts
124  */
125 static int msi = 2;
126
127 module_param(msi, int, 0644);
128 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
129
130 /*
131  * The driver enables offload as a default.
132  * To disable it, use ofld_disable = 1.
133  */
134
135 static int ofld_disable = 0;
136
137 module_param(ofld_disable, int, 0644);
138 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
139
140 /*
141  * We have work elements that we need to cancel when an interface is taken
142  * down.  Normally the work elements would be executed by keventd but that
143  * can deadlock because of linkwatch.  If our close method takes the rtnl
144  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
145  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
146  * for our work to complete.  Get our own work queue to solve this.
147  */
148 struct workqueue_struct *cxgb3_wq;
149
150 /**
151  *      link_report - show link status and link speed/duplex
152  *      @p: the port whose settings are to be reported
153  *
154  *      Shows the link status, speed, and duplex of a port.
155  */
156 static void link_report(struct net_device *dev)
157 {
158         if (!netif_carrier_ok(dev))
159                 netdev_info(dev, "link down\n");
160         else {
161                 const char *s = "10Mbps";
162                 const struct port_info *p = netdev_priv(dev);
163
164                 switch (p->link_config.speed) {
165                 case SPEED_10000:
166                         s = "10Gbps";
167                         break;
168                 case SPEED_1000:
169                         s = "1000Mbps";
170                         break;
171                 case SPEED_100:
172                         s = "100Mbps";
173                         break;
174                 }
175
176                 netdev_info(dev, "link up, %s, %s-duplex\n",
177                             s, p->link_config.duplex == DUPLEX_FULL
178                             ? "full" : "half");
179         }
180 }
181
182 static void enable_tx_fifo_drain(struct adapter *adapter,
183                                  struct port_info *pi)
184 {
185         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
186                          F_ENDROPPKT);
187         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
188         t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
189         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
190 }
191
192 static void disable_tx_fifo_drain(struct adapter *adapter,
193                                   struct port_info *pi)
194 {
195         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
196                          F_ENDROPPKT, 0);
197 }
198
199 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
200 {
201         struct net_device *dev = adap->port[port_id];
202         struct port_info *pi = netdev_priv(dev);
203
204         if (state == netif_carrier_ok(dev))
205                 return;
206
207         if (state) {
208                 struct cmac *mac = &pi->mac;
209
210                 netif_carrier_on(dev);
211
212                 disable_tx_fifo_drain(adap, pi);
213
214                 /* Clear local faults */
215                 t3_xgm_intr_disable(adap, pi->port_id);
216                 t3_read_reg(adap, A_XGM_INT_STATUS +
217                                     pi->mac.offset);
218                 t3_write_reg(adap,
219                              A_XGM_INT_CAUSE + pi->mac.offset,
220                              F_XGM_INT);
221
222                 t3_set_reg_field(adap,
223                                  A_XGM_INT_ENABLE +
224                                  pi->mac.offset,
225                                  F_XGM_INT, F_XGM_INT);
226                 t3_xgm_intr_enable(adap, pi->port_id);
227
228                 t3_mac_enable(mac, MAC_DIRECTION_TX);
229         } else {
230                 netif_carrier_off(dev);
231
232                 /* Flush TX FIFO */
233                 enable_tx_fifo_drain(adap, pi);
234         }
235         link_report(dev);
236 }
237
238 /**
239  *      t3_os_link_changed - handle link status changes
240  *      @adapter: the adapter associated with the link change
241  *      @port_id: the port index whose limk status has changed
242  *      @link_stat: the new status of the link
243  *      @speed: the new speed setting
244  *      @duplex: the new duplex setting
245  *      @pause: the new flow-control setting
246  *
247  *      This is the OS-dependent handler for link status changes.  The OS
248  *      neutral handler takes care of most of the processing for these events,
249  *      then calls this handler for any OS-specific processing.
250  */
251 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
252                         int speed, int duplex, int pause)
253 {
254         struct net_device *dev = adapter->port[port_id];
255         struct port_info *pi = netdev_priv(dev);
256         struct cmac *mac = &pi->mac;
257
258         /* Skip changes from disabled ports. */
259         if (!netif_running(dev))
260                 return;
261
262         if (link_stat != netif_carrier_ok(dev)) {
263                 if (link_stat) {
264                         disable_tx_fifo_drain(adapter, pi);
265
266                         t3_mac_enable(mac, MAC_DIRECTION_RX);
267
268                         /* Clear local faults */
269                         t3_xgm_intr_disable(adapter, pi->port_id);
270                         t3_read_reg(adapter, A_XGM_INT_STATUS +
271                                     pi->mac.offset);
272                         t3_write_reg(adapter,
273                                      A_XGM_INT_CAUSE + pi->mac.offset,
274                                      F_XGM_INT);
275
276                         t3_set_reg_field(adapter,
277                                          A_XGM_INT_ENABLE + pi->mac.offset,
278                                          F_XGM_INT, F_XGM_INT);
279                         t3_xgm_intr_enable(adapter, pi->port_id);
280
281                         netif_carrier_on(dev);
282                 } else {
283                         netif_carrier_off(dev);
284
285                         t3_xgm_intr_disable(adapter, pi->port_id);
286                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
287                         t3_set_reg_field(adapter,
288                                          A_XGM_INT_ENABLE + pi->mac.offset,
289                                          F_XGM_INT, 0);
290
291                         if (is_10G(adapter))
292                                 pi->phy.ops->power_down(&pi->phy, 1);
293
294                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
295                         t3_mac_disable(mac, MAC_DIRECTION_RX);
296                         t3_link_start(&pi->phy, mac, &pi->link_config);
297
298                         /* Flush TX FIFO */
299                         enable_tx_fifo_drain(adapter, pi);
300                 }
301
302                 link_report(dev);
303         }
304 }
305
306 /**
307  *      t3_os_phymod_changed - handle PHY module changes
308  *      @phy: the PHY reporting the module change
309  *      @mod_type: new module type
310  *
311  *      This is the OS-dependent handler for PHY module changes.  It is
312  *      invoked when a PHY module is removed or inserted for any OS-specific
313  *      processing.
314  */
315 void t3_os_phymod_changed(struct adapter *adap, int port_id)
316 {
317         static const char *mod_str[] = {
318                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
319         };
320
321         const struct net_device *dev = adap->port[port_id];
322         const struct port_info *pi = netdev_priv(dev);
323
324         if (pi->phy.modtype == phy_modtype_none)
325                 netdev_info(dev, "PHY module unplugged\n");
326         else
327                 netdev_info(dev, "%s PHY module inserted\n",
328                             mod_str[pi->phy.modtype]);
329 }
330
331 static void cxgb_set_rxmode(struct net_device *dev)
332 {
333         struct port_info *pi = netdev_priv(dev);
334
335         t3_mac_set_rx_mode(&pi->mac, dev);
336 }
337
338 /**
339  *      link_start - enable a port
340  *      @dev: the device to enable
341  *
342  *      Performs the MAC and PHY actions needed to enable a port.
343  */
344 static void link_start(struct net_device *dev)
345 {
346         struct port_info *pi = netdev_priv(dev);
347         struct cmac *mac = &pi->mac;
348
349         t3_mac_reset(mac);
350         t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
351         t3_mac_set_mtu(mac, dev->mtu);
352         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
353         t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
354         t3_mac_set_rx_mode(mac, dev);
355         t3_link_start(&pi->phy, mac, &pi->link_config);
356         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
357 }
358
359 static inline void cxgb_disable_msi(struct adapter *adapter)
360 {
361         if (adapter->flags & USING_MSIX) {
362                 pci_disable_msix(adapter->pdev);
363                 adapter->flags &= ~USING_MSIX;
364         } else if (adapter->flags & USING_MSI) {
365                 pci_disable_msi(adapter->pdev);
366                 adapter->flags &= ~USING_MSI;
367         }
368 }
369
370 /*
371  * Interrupt handler for asynchronous events used with MSI-X.
372  */
373 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
374 {
375         t3_slow_intr_handler(cookie);
376         return IRQ_HANDLED;
377 }
378
379 /*
380  * Name the MSI-X interrupts.
381  */
382 static void name_msix_vecs(struct adapter *adap)
383 {
384         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
385
386         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
387         adap->msix_info[0].desc[n] = 0;
388
389         for_each_port(adap, j) {
390                 struct net_device *d = adap->port[j];
391                 const struct port_info *pi = netdev_priv(d);
392
393                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
394                         snprintf(adap->msix_info[msi_idx].desc, n,
395                                  "%s-%d", d->name, pi->first_qset + i);
396                         adap->msix_info[msi_idx].desc[n] = 0;
397                 }
398         }
399 }
400
401 static int request_msix_data_irqs(struct adapter *adap)
402 {
403         int i, j, err, qidx = 0;
404
405         for_each_port(adap, i) {
406                 int nqsets = adap2pinfo(adap, i)->nqsets;
407
408                 for (j = 0; j < nqsets; ++j) {
409                         err = request_irq(adap->msix_info[qidx + 1].vec,
410                                           t3_intr_handler(adap,
411                                                           adap->sge.qs[qidx].
412                                                           rspq.polling), 0,
413                                           adap->msix_info[qidx + 1].desc,
414                                           &adap->sge.qs[qidx]);
415                         if (err) {
416                                 while (--qidx >= 0)
417                                         free_irq(adap->msix_info[qidx + 1].vec,
418                                                  &adap->sge.qs[qidx]);
419                                 return err;
420                         }
421                         qidx++;
422                 }
423         }
424         return 0;
425 }
426
427 static void free_irq_resources(struct adapter *adapter)
428 {
429         if (adapter->flags & USING_MSIX) {
430                 int i, n = 0;
431
432                 free_irq(adapter->msix_info[0].vec, adapter);
433                 for_each_port(adapter, i)
434                         n += adap2pinfo(adapter, i)->nqsets;
435
436                 for (i = 0; i < n; ++i)
437                         free_irq(adapter->msix_info[i + 1].vec,
438                                  &adapter->sge.qs[i]);
439         } else
440                 free_irq(adapter->pdev->irq, adapter);
441 }
442
443 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
444                               unsigned long n)
445 {
446         int attempts = 10;
447
448         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
449                 if (!--attempts)
450                         return -ETIMEDOUT;
451                 msleep(10);
452         }
453         return 0;
454 }
455
456 static int init_tp_parity(struct adapter *adap)
457 {
458         int i;
459         struct sk_buff *skb;
460         struct cpl_set_tcb_field *greq;
461         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
462
463         t3_tp_set_offload_mode(adap, 1);
464
465         for (i = 0; i < 16; i++) {
466                 struct cpl_smt_write_req *req;
467
468                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
469                 if (!skb)
470                         skb = adap->nofail_skb;
471                 if (!skb)
472                         goto alloc_skb_fail;
473
474                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
475                 memset(req, 0, sizeof(*req));
476                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
477                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
478                 req->mtu_idx = NMTUS - 1;
479                 req->iff = i;
480                 t3_mgmt_tx(adap, skb);
481                 if (skb == adap->nofail_skb) {
482                         await_mgmt_replies(adap, cnt, i + 1);
483                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
484                         if (!adap->nofail_skb)
485                                 goto alloc_skb_fail;
486                 }
487         }
488
489         for (i = 0; i < 2048; i++) {
490                 struct cpl_l2t_write_req *req;
491
492                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
493                 if (!skb)
494                         skb = adap->nofail_skb;
495                 if (!skb)
496                         goto alloc_skb_fail;
497
498                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
499                 memset(req, 0, sizeof(*req));
500                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
501                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
502                 req->params = htonl(V_L2T_W_IDX(i));
503                 t3_mgmt_tx(adap, skb);
504                 if (skb == adap->nofail_skb) {
505                         await_mgmt_replies(adap, cnt, 16 + i + 1);
506                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
507                         if (!adap->nofail_skb)
508                                 goto alloc_skb_fail;
509                 }
510         }
511
512         for (i = 0; i < 2048; i++) {
513                 struct cpl_rte_write_req *req;
514
515                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
516                 if (!skb)
517                         skb = adap->nofail_skb;
518                 if (!skb)
519                         goto alloc_skb_fail;
520
521                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
522                 memset(req, 0, sizeof(*req));
523                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
524                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
525                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
526                 t3_mgmt_tx(adap, skb);
527                 if (skb == adap->nofail_skb) {
528                         await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
529                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
530                         if (!adap->nofail_skb)
531                                 goto alloc_skb_fail;
532                 }
533         }
534
535         skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
536         if (!skb)
537                 skb = adap->nofail_skb;
538         if (!skb)
539                 goto alloc_skb_fail;
540
541         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
542         memset(greq, 0, sizeof(*greq));
543         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
544         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
545         greq->mask = cpu_to_be64(1);
546         t3_mgmt_tx(adap, skb);
547
548         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
549         if (skb == adap->nofail_skb) {
550                 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
551                 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
552         }
553
554         t3_tp_set_offload_mode(adap, 0);
555         return i;
556
557 alloc_skb_fail:
558         t3_tp_set_offload_mode(adap, 0);
559         return -ENOMEM;
560 }
561
562 /**
563  *      setup_rss - configure RSS
564  *      @adap: the adapter
565  *
566  *      Sets up RSS to distribute packets to multiple receive queues.  We
567  *      configure the RSS CPU lookup table to distribute to the number of HW
568  *      receive queues, and the response queue lookup table to narrow that
569  *      down to the response queues actually configured for each port.
570  *      We always configure the RSS mapping for two ports since the mapping
571  *      table has plenty of entries.
572  */
573 static void setup_rss(struct adapter *adap)
574 {
575         int i;
576         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
577         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
578         u8 cpus[SGE_QSETS + 1];
579         u16 rspq_map[RSS_TABLE_SIZE + 1];
580
581         for (i = 0; i < SGE_QSETS; ++i)
582                 cpus[i] = i;
583         cpus[SGE_QSETS] = 0xff; /* terminator */
584
585         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
586                 rspq_map[i] = i % nq0;
587                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
588         }
589         rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
590
591         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
592                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
593                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
594 }
595
596 static void ring_dbs(struct adapter *adap)
597 {
598         int i, j;
599
600         for (i = 0; i < SGE_QSETS; i++) {
601                 struct sge_qset *qs = &adap->sge.qs[i];
602
603                 if (qs->adap)
604                         for (j = 0; j < SGE_TXQ_PER_SET; j++)
605                                 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
606         }
607 }
608
609 static void init_napi(struct adapter *adap)
610 {
611         int i;
612
613         for (i = 0; i < SGE_QSETS; i++) {
614                 struct sge_qset *qs = &adap->sge.qs[i];
615
616                 if (qs->adap)
617                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
618                                        64);
619         }
620
621         /*
622          * netif_napi_add() can be called only once per napi_struct because it
623          * adds each new napi_struct to a list.  Be careful not to call it a
624          * second time, e.g., during EEH recovery, by making a note of it.
625          */
626         adap->flags |= NAPI_INIT;
627 }
628
629 /*
630  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
631  * both netdevices representing interfaces and the dummy ones for the extra
632  * queues.
633  */
634 static void quiesce_rx(struct adapter *adap)
635 {
636         int i;
637
638         for (i = 0; i < SGE_QSETS; i++)
639                 if (adap->sge.qs[i].adap)
640                         napi_disable(&adap->sge.qs[i].napi);
641 }
642
643 static void enable_all_napi(struct adapter *adap)
644 {
645         int i;
646         for (i = 0; i < SGE_QSETS; i++)
647                 if (adap->sge.qs[i].adap)
648                         napi_enable(&adap->sge.qs[i].napi);
649 }
650
651 /**
652  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
653  *      @adap: the adapter
654  *
655  *      Determines how many sets of SGE queues to use and initializes them.
656  *      We support multiple queue sets per port if we have MSI-X, otherwise
657  *      just one queue set per port.
658  */
659 static int setup_sge_qsets(struct adapter *adap)
660 {
661         int i, j, err, irq_idx = 0, qset_idx = 0;
662         unsigned int ntxq = SGE_TXQ_PER_SET;
663
664         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
665                 irq_idx = -1;
666
667         for_each_port(adap, i) {
668                 struct net_device *dev = adap->port[i];
669                 struct port_info *pi = netdev_priv(dev);
670
671                 pi->qs = &adap->sge.qs[pi->first_qset];
672                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
673                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
674                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
675                                                              irq_idx,
676                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
677                                 netdev_get_tx_queue(dev, j));
678                         if (err) {
679                                 t3_free_sge_resources(adap);
680                                 return err;
681                         }
682                 }
683         }
684
685         return 0;
686 }
687
688 static ssize_t attr_show(struct device *d, char *buf,
689                          ssize_t(*format) (struct net_device *, char *))
690 {
691         ssize_t len;
692
693         /* Synchronize with ioctls that may shut down the device */
694         rtnl_lock();
695         len = (*format) (to_net_dev(d), buf);
696         rtnl_unlock();
697         return len;
698 }
699
700 static ssize_t attr_store(struct device *d,
701                           const char *buf, size_t len,
702                           ssize_t(*set) (struct net_device *, unsigned int),
703                           unsigned int min_val, unsigned int max_val)
704 {
705         ssize_t ret;
706         unsigned int val;
707
708         if (!capable(CAP_NET_ADMIN))
709                 return -EPERM;
710
711         ret = kstrtouint(buf, 0, &val);
712         if (ret)
713                 return ret;
714         if (val < min_val || val > max_val)
715                 return -EINVAL;
716
717         rtnl_lock();
718         ret = (*set) (to_net_dev(d), val);
719         if (!ret)
720                 ret = len;
721         rtnl_unlock();
722         return ret;
723 }
724
725 #define CXGB3_SHOW(name, val_expr) \
726 static ssize_t format_##name(struct net_device *dev, char *buf) \
727 { \
728         struct port_info *pi = netdev_priv(dev); \
729         struct adapter *adap = pi->adapter; \
730         return sprintf(buf, "%u\n", val_expr); \
731 } \
732 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
733                            char *buf) \
734 { \
735         return attr_show(d, buf, format_##name); \
736 }
737
738 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
739 {
740         struct port_info *pi = netdev_priv(dev);
741         struct adapter *adap = pi->adapter;
742         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
743
744         if (adap->flags & FULL_INIT_DONE)
745                 return -EBUSY;
746         if (val && adap->params.rev == 0)
747                 return -EINVAL;
748         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
749             min_tids)
750                 return -EINVAL;
751         adap->params.mc5.nfilters = val;
752         return 0;
753 }
754
755 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
756                               const char *buf, size_t len)
757 {
758         return attr_store(d, buf, len, set_nfilters, 0, ~0);
759 }
760
761 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
762 {
763         struct port_info *pi = netdev_priv(dev);
764         struct adapter *adap = pi->adapter;
765
766         if (adap->flags & FULL_INIT_DONE)
767                 return -EBUSY;
768         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
769             MC5_MIN_TIDS)
770                 return -EINVAL;
771         adap->params.mc5.nservers = val;
772         return 0;
773 }
774
775 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
776                               const char *buf, size_t len)
777 {
778         return attr_store(d, buf, len, set_nservers, 0, ~0);
779 }
780
781 #define CXGB3_ATTR_R(name, val_expr) \
782 CXGB3_SHOW(name, val_expr) \
783 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
784
785 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
786 CXGB3_SHOW(name, val_expr) \
787 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
788
789 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
790 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
791 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
792
793 static struct attribute *cxgb3_attrs[] = {
794         &dev_attr_cam_size.attr,
795         &dev_attr_nfilters.attr,
796         &dev_attr_nservers.attr,
797         NULL
798 };
799
800 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
801
802 static ssize_t tm_attr_show(struct device *d,
803                             char *buf, int sched)
804 {
805         struct port_info *pi = netdev_priv(to_net_dev(d));
806         struct adapter *adap = pi->adapter;
807         unsigned int v, addr, bpt, cpt;
808         ssize_t len;
809
810         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
811         rtnl_lock();
812         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
813         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
814         if (sched & 1)
815                 v >>= 16;
816         bpt = (v >> 8) & 0xff;
817         cpt = v & 0xff;
818         if (!cpt)
819                 len = sprintf(buf, "disabled\n");
820         else {
821                 v = (adap->params.vpd.cclk * 1000) / cpt;
822                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
823         }
824         rtnl_unlock();
825         return len;
826 }
827
828 static ssize_t tm_attr_store(struct device *d,
829                              const char *buf, size_t len, int sched)
830 {
831         struct port_info *pi = netdev_priv(to_net_dev(d));
832         struct adapter *adap = pi->adapter;
833         unsigned int val;
834         ssize_t ret;
835
836         if (!capable(CAP_NET_ADMIN))
837                 return -EPERM;
838
839         ret = kstrtouint(buf, 0, &val);
840         if (ret)
841                 return ret;
842         if (val > 10000000)
843                 return -EINVAL;
844
845         rtnl_lock();
846         ret = t3_config_sched(adap, val, sched);
847         if (!ret)
848                 ret = len;
849         rtnl_unlock();
850         return ret;
851 }
852
853 #define TM_ATTR(name, sched) \
854 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
855                            char *buf) \
856 { \
857         return tm_attr_show(d, buf, sched); \
858 } \
859 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
860                             const char *buf, size_t len) \
861 { \
862         return tm_attr_store(d, buf, len, sched); \
863 } \
864 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
865
866 TM_ATTR(sched0, 0);
867 TM_ATTR(sched1, 1);
868 TM_ATTR(sched2, 2);
869 TM_ATTR(sched3, 3);
870 TM_ATTR(sched4, 4);
871 TM_ATTR(sched5, 5);
872 TM_ATTR(sched6, 6);
873 TM_ATTR(sched7, 7);
874
875 static struct attribute *offload_attrs[] = {
876         &dev_attr_sched0.attr,
877         &dev_attr_sched1.attr,
878         &dev_attr_sched2.attr,
879         &dev_attr_sched3.attr,
880         &dev_attr_sched4.attr,
881         &dev_attr_sched5.attr,
882         &dev_attr_sched6.attr,
883         &dev_attr_sched7.attr,
884         NULL
885 };
886
887 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
888
889 /*
890  * Sends an sk_buff to an offload queue driver
891  * after dealing with any active network taps.
892  */
893 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
894 {
895         int ret;
896
897         local_bh_disable();
898         ret = t3_offload_tx(tdev, skb);
899         local_bh_enable();
900         return ret;
901 }
902
903 static int write_smt_entry(struct adapter *adapter, int idx)
904 {
905         struct cpl_smt_write_req *req;
906         struct port_info *pi = netdev_priv(adapter->port[idx]);
907         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
908
909         if (!skb)
910                 return -ENOMEM;
911
912         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
913         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
914         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
915         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
916         req->iff = idx;
917         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
918         memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
919         skb->priority = 1;
920         offload_tx(&adapter->tdev, skb);
921         return 0;
922 }
923
924 static int init_smt(struct adapter *adapter)
925 {
926         int i;
927
928         for_each_port(adapter, i)
929             write_smt_entry(adapter, i);
930         return 0;
931 }
932
933 static void init_port_mtus(struct adapter *adapter)
934 {
935         unsigned int mtus = adapter->port[0]->mtu;
936
937         if (adapter->port[1])
938                 mtus |= adapter->port[1]->mtu << 16;
939         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
940 }
941
942 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
943                               int hi, int port)
944 {
945         struct sk_buff *skb;
946         struct mngt_pktsched_wr *req;
947         int ret;
948
949         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
950         if (!skb)
951                 skb = adap->nofail_skb;
952         if (!skb)
953                 return -ENOMEM;
954
955         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
956         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
957         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
958         req->sched = sched;
959         req->idx = qidx;
960         req->min = lo;
961         req->max = hi;
962         req->binding = port;
963         ret = t3_mgmt_tx(adap, skb);
964         if (skb == adap->nofail_skb) {
965                 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
966                                              GFP_KERNEL);
967                 if (!adap->nofail_skb)
968                         ret = -ENOMEM;
969         }
970
971         return ret;
972 }
973
974 static int bind_qsets(struct adapter *adap)
975 {
976         int i, j, err = 0;
977
978         for_each_port(adap, i) {
979                 const struct port_info *pi = adap2pinfo(adap, i);
980
981                 for (j = 0; j < pi->nqsets; ++j) {
982                         int ret = send_pktsched_cmd(adap, 1,
983                                                     pi->first_qset + j, -1,
984                                                     -1, i);
985                         if (ret)
986                                 err = ret;
987                 }
988         }
989
990         return err;
991 }
992
993 /*(DEBLOBBED)*/
994 #define FW_FNAME "/*(DEBLOBBED)*/"
995 /*(DEBLOBBED)*/
996 #define TPSRAM_NAME "/*(DEBLOBBED)*/"
997 #define AEL2005_OPT_EDC_NAME "/*(DEBLOBBED)*/"
998 #define AEL2005_TWX_EDC_NAME "/*(DEBLOBBED)*/"
999 #define AEL2020_TWX_EDC_NAME "/*(DEBLOBBED)*/"
1000 /*(DEBLOBBED)*/
1001
1002 static inline const char *get_edc_fw_name(int edc_idx)
1003 {
1004         const char *fw_name = NULL;
1005
1006         switch (edc_idx) {
1007         case EDC_OPT_AEL2005:
1008                 fw_name = AEL2005_OPT_EDC_NAME;
1009                 break;
1010         case EDC_TWX_AEL2005:
1011                 fw_name = AEL2005_TWX_EDC_NAME;
1012                 break;
1013         case EDC_TWX_AEL2020:
1014                 fw_name = AEL2020_TWX_EDC_NAME;
1015                 break;
1016         }
1017         return fw_name;
1018 }
1019
1020 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1021 {
1022         struct adapter *adapter = phy->adapter;
1023         const struct firmware *fw;
1024         const char *fw_name;
1025         u32 csum;
1026         const __be32 *p;
1027         u16 *cache = phy->phy_cache;
1028         int i, ret = -EINVAL;
1029
1030         fw_name = get_edc_fw_name(edc_idx);
1031         if (fw_name)
1032                 ret = reject_firmware(&fw, fw_name, &adapter->pdev->dev);
1033         if (ret < 0) {
1034                 dev_err(&adapter->pdev->dev,
1035                         "could not upgrade firmware: unable to load %s\n",
1036                         fw_name);
1037                 return ret;
1038         }
1039
1040         /* check size, take checksum in account */
1041         if (fw->size > size + 4) {
1042                 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1043                        (unsigned int)fw->size, size + 4);
1044                 ret = -EINVAL;
1045         }
1046
1047         /* compute checksum */
1048         p = (const __be32 *)fw->data;
1049         for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1050                 csum += ntohl(p[i]);
1051
1052         if (csum != 0xffffffff) {
1053                 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1054                        csum);
1055                 ret = -EINVAL;
1056         }
1057
1058         for (i = 0; i < size / 4 ; i++) {
1059                 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1060                 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1061         }
1062
1063         release_firmware(fw);
1064
1065         return ret;
1066 }
1067
1068 static int upgrade_fw(struct adapter *adap)
1069 {
1070         int ret;
1071         const struct firmware *fw;
1072         struct device *dev = &adap->pdev->dev;
1073
1074         ret = reject_firmware(&fw, FW_FNAME, dev);
1075         if (ret < 0) {
1076                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1077                         FW_FNAME);
1078                 return ret;
1079         }
1080         ret = t3_load_fw(adap, fw->data, fw->size);
1081         release_firmware(fw);
1082
1083         if (ret == 0)
1084                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1085                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1086         else
1087                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1088                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1089
1090         return ret;
1091 }
1092
1093 static inline char t3rev2char(struct adapter *adapter)
1094 {
1095         char rev = 0;
1096
1097         switch(adapter->params.rev) {
1098         case T3_REV_B:
1099         case T3_REV_B2:
1100                 rev = 'b';
1101                 break;
1102         case T3_REV_C:
1103                 rev = 'c';
1104                 break;
1105         }
1106         return rev;
1107 }
1108
1109 static int update_tpsram(struct adapter *adap)
1110 {
1111         const struct firmware *tpsram;
1112         char buf[64];
1113         struct device *dev = &adap->pdev->dev;
1114         int ret;
1115         char rev;
1116
1117         rev = t3rev2char(adap);
1118         if (!rev)
1119                 return 0;
1120
1121         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1122
1123         ret = reject_firmware(&tpsram, buf, dev);
1124         if (ret < 0) {
1125                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1126                         buf);
1127                 return ret;
1128         }
1129
1130         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1131         if (ret)
1132                 goto release_tpsram;
1133
1134         ret = t3_set_proto_sram(adap, tpsram->data);
1135         if (ret == 0)
1136                 dev_info(dev,
1137                          "successful update of protocol engine "
1138                          "to %d.%d.%d\n",
1139                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1140         else
1141                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1142                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1143         if (ret)
1144                 dev_err(dev, "loading protocol SRAM failed\n");
1145
1146 release_tpsram:
1147         release_firmware(tpsram);
1148
1149         return ret;
1150 }
1151
1152 /**
1153  * t3_synchronize_rx - wait for current Rx processing on a port to complete
1154  * @adap: the adapter
1155  * @p: the port
1156  *
1157  * Ensures that current Rx processing on any of the queues associated with
1158  * the given port completes before returning.  We do this by acquiring and
1159  * releasing the locks of the response queues associated with the port.
1160  */
1161 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1162 {
1163         int i;
1164
1165         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1166                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1167
1168                 spin_lock_irq(&q->lock);
1169                 spin_unlock_irq(&q->lock);
1170         }
1171 }
1172
1173 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1174 {
1175         struct port_info *pi = netdev_priv(dev);
1176         struct adapter *adapter = pi->adapter;
1177
1178         if (adapter->params.rev > 0) {
1179                 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1180                                   features & NETIF_F_HW_VLAN_CTAG_RX);
1181         } else {
1182                 /* single control for all ports */
1183                 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1184
1185                 for_each_port(adapter, i)
1186                         have_vlans |=
1187                                 adapter->port[i]->features &
1188                                 NETIF_F_HW_VLAN_CTAG_RX;
1189
1190                 t3_set_vlan_accel(adapter, 1, have_vlans);
1191         }
1192         t3_synchronize_rx(adapter, pi);
1193 }
1194
1195 /**
1196  *      cxgb_up - enable the adapter
1197  *      @adapter: adapter being enabled
1198  *
1199  *      Called when the first port is enabled, this function performs the
1200  *      actions necessary to make an adapter operational, such as completing
1201  *      the initialization of HW modules, and enabling interrupts.
1202  *
1203  *      Must be called with the rtnl lock held.
1204  */
1205 static int cxgb_up(struct adapter *adap)
1206 {
1207         int i, err;
1208
1209         if (!(adap->flags & FULL_INIT_DONE)) {
1210                 err = t3_check_fw_version(adap);
1211                 if (err == -EINVAL) {
1212                         err = upgrade_fw(adap);
1213                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1214                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1215                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1216                 }
1217
1218                 err = t3_check_tpsram_version(adap);
1219                 if (err == -EINVAL) {
1220                         err = update_tpsram(adap);
1221                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1222                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1223                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1224                 }
1225
1226                 /*
1227                  * Clear interrupts now to catch errors if t3_init_hw fails.
1228                  * We clear them again later as initialization may trigger
1229                  * conditions that can interrupt.
1230                  */
1231                 t3_intr_clear(adap);
1232
1233                 err = t3_init_hw(adap, 0);
1234                 if (err)
1235                         goto out;
1236
1237                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1238                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1239
1240                 err = setup_sge_qsets(adap);
1241                 if (err)
1242                         goto out;
1243
1244                 for_each_port(adap, i)
1245                         cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1246
1247                 setup_rss(adap);
1248                 if (!(adap->flags & NAPI_INIT))
1249                         init_napi(adap);
1250
1251                 t3_start_sge_timers(adap);
1252                 adap->flags |= FULL_INIT_DONE;
1253         }
1254
1255         t3_intr_clear(adap);
1256
1257         if (adap->flags & USING_MSIX) {
1258                 name_msix_vecs(adap);
1259                 err = request_irq(adap->msix_info[0].vec,
1260                                   t3_async_intr_handler, 0,
1261                                   adap->msix_info[0].desc, adap);
1262                 if (err)
1263                         goto irq_err;
1264
1265                 err = request_msix_data_irqs(adap);
1266                 if (err) {
1267                         free_irq(adap->msix_info[0].vec, adap);
1268                         goto irq_err;
1269                 }
1270         } else if ((err = request_irq(adap->pdev->irq,
1271                                       t3_intr_handler(adap,
1272                                                       adap->sge.qs[0].rspq.
1273                                                       polling),
1274                                       (adap->flags & USING_MSI) ?
1275                                        0 : IRQF_SHARED,
1276                                       adap->name, adap)))
1277                 goto irq_err;
1278
1279         enable_all_napi(adap);
1280         t3_sge_start(adap);
1281         t3_intr_enable(adap);
1282
1283         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1284             is_offload(adap) && init_tp_parity(adap) == 0)
1285                 adap->flags |= TP_PARITY_INIT;
1286
1287         if (adap->flags & TP_PARITY_INIT) {
1288                 t3_write_reg(adap, A_TP_INT_CAUSE,
1289                              F_CMCACHEPERR | F_ARPLUTPERR);
1290                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1291         }
1292
1293         if (!(adap->flags & QUEUES_BOUND)) {
1294                 int ret = bind_qsets(adap);
1295
1296                 if (ret < 0) {
1297                         CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1298                         t3_intr_disable(adap);
1299                         free_irq_resources(adap);
1300                         err = ret;
1301                         goto out;
1302                 }
1303                 adap->flags |= QUEUES_BOUND;
1304         }
1305
1306 out:
1307         return err;
1308 irq_err:
1309         CH_ERR(adap, "request_irq failed, err %d\n", err);
1310         goto out;
1311 }
1312
1313 /*
1314  * Release resources when all the ports and offloading have been stopped.
1315  */
1316 static void cxgb_down(struct adapter *adapter, int on_wq)
1317 {
1318         t3_sge_stop(adapter);
1319         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1320         t3_intr_disable(adapter);
1321         spin_unlock_irq(&adapter->work_lock);
1322
1323         free_irq_resources(adapter);
1324         quiesce_rx(adapter);
1325         t3_sge_stop(adapter);
1326         if (!on_wq)
1327                 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1328 }
1329
1330 static void schedule_chk_task(struct adapter *adap)
1331 {
1332         unsigned int timeo;
1333
1334         timeo = adap->params.linkpoll_period ?
1335             (HZ * adap->params.linkpoll_period) / 10 :
1336             adap->params.stats_update_period * HZ;
1337         if (timeo)
1338                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1339 }
1340
1341 static int offload_open(struct net_device *dev)
1342 {
1343         struct port_info *pi = netdev_priv(dev);
1344         struct adapter *adapter = pi->adapter;
1345         struct t3cdev *tdev = dev2t3cdev(dev);
1346         int adap_up = adapter->open_device_map & PORT_MASK;
1347         int err;
1348
1349         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1350                 return 0;
1351
1352         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1353                 goto out;
1354
1355         t3_tp_set_offload_mode(adapter, 1);
1356         tdev->lldev = adapter->port[0];
1357         err = cxgb3_offload_activate(adapter);
1358         if (err)
1359                 goto out;
1360
1361         init_port_mtus(adapter);
1362         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1363                      adapter->params.b_wnd,
1364                      adapter->params.rev == 0 ?
1365                      adapter->port[0]->mtu : 0xffff);
1366         init_smt(adapter);
1367
1368         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1369                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1370
1371         /* Call back all registered clients */
1372         cxgb3_add_clients(tdev);
1373
1374 out:
1375         /* restore them in case the offload module has changed them */
1376         if (err) {
1377                 t3_tp_set_offload_mode(adapter, 0);
1378                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1379                 cxgb3_set_dummy_ops(tdev);
1380         }
1381         return err;
1382 }
1383
1384 static int offload_close(struct t3cdev *tdev)
1385 {
1386         struct adapter *adapter = tdev2adap(tdev);
1387         struct t3c_data *td = T3C_DATA(tdev);
1388
1389         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1390                 return 0;
1391
1392         /* Call back all registered clients */
1393         cxgb3_remove_clients(tdev);
1394
1395         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1396
1397         /* Flush work scheduled while releasing TIDs */
1398         flush_work(&td->tid_release_task);
1399
1400         tdev->lldev = NULL;
1401         cxgb3_set_dummy_ops(tdev);
1402         t3_tp_set_offload_mode(adapter, 0);
1403         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1404
1405         if (!adapter->open_device_map)
1406                 cxgb_down(adapter, 0);
1407
1408         cxgb3_offload_deactivate(adapter);
1409         return 0;
1410 }
1411
1412 static int cxgb_open(struct net_device *dev)
1413 {
1414         struct port_info *pi = netdev_priv(dev);
1415         struct adapter *adapter = pi->adapter;
1416         int other_ports = adapter->open_device_map & PORT_MASK;
1417         int err;
1418
1419         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1420                 return err;
1421
1422         set_bit(pi->port_id, &adapter->open_device_map);
1423         if (is_offload(adapter) && !ofld_disable) {
1424                 err = offload_open(dev);
1425                 if (err)
1426                         pr_warn("Could not initialize offload capabilities\n");
1427         }
1428
1429         netif_set_real_num_tx_queues(dev, pi->nqsets);
1430         err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1431         if (err)
1432                 return err;
1433         link_start(dev);
1434         t3_port_intr_enable(adapter, pi->port_id);
1435         netif_tx_start_all_queues(dev);
1436         if (!other_ports)
1437                 schedule_chk_task(adapter);
1438
1439         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1440         return 0;
1441 }
1442
1443 static int __cxgb_close(struct net_device *dev, int on_wq)
1444 {
1445         struct port_info *pi = netdev_priv(dev);
1446         struct adapter *adapter = pi->adapter;
1447
1448         
1449         if (!adapter->open_device_map)
1450                 return 0;
1451
1452         /* Stop link fault interrupts */
1453         t3_xgm_intr_disable(adapter, pi->port_id);
1454         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1455
1456         t3_port_intr_disable(adapter, pi->port_id);
1457         netif_tx_stop_all_queues(dev);
1458         pi->phy.ops->power_down(&pi->phy, 1);
1459         netif_carrier_off(dev);
1460         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1461
1462         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1463         clear_bit(pi->port_id, &adapter->open_device_map);
1464         spin_unlock_irq(&adapter->work_lock);
1465
1466         if (!(adapter->open_device_map & PORT_MASK))
1467                 cancel_delayed_work_sync(&adapter->adap_check_task);
1468
1469         if (!adapter->open_device_map)
1470                 cxgb_down(adapter, on_wq);
1471
1472         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1473         return 0;
1474 }
1475
1476 static int cxgb_close(struct net_device *dev)
1477 {
1478         return __cxgb_close(dev, 0);
1479 }
1480
1481 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1482 {
1483         struct port_info *pi = netdev_priv(dev);
1484         struct adapter *adapter = pi->adapter;
1485         struct net_device_stats *ns = &pi->netstats;
1486         const struct mac_stats *pstats;
1487
1488         spin_lock(&adapter->stats_lock);
1489         pstats = t3_mac_update_stats(&pi->mac);
1490         spin_unlock(&adapter->stats_lock);
1491
1492         ns->tx_bytes = pstats->tx_octets;
1493         ns->tx_packets = pstats->tx_frames;
1494         ns->rx_bytes = pstats->rx_octets;
1495         ns->rx_packets = pstats->rx_frames;
1496         ns->multicast = pstats->rx_mcast_frames;
1497
1498         ns->tx_errors = pstats->tx_underrun;
1499         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1500             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1501             pstats->rx_fifo_ovfl;
1502
1503         /* detailed rx_errors */
1504         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1505         ns->rx_over_errors = 0;
1506         ns->rx_crc_errors = pstats->rx_fcs_errs;
1507         ns->rx_frame_errors = pstats->rx_symbol_errs;
1508         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1509         ns->rx_missed_errors = pstats->rx_cong_drops;
1510
1511         /* detailed tx_errors */
1512         ns->tx_aborted_errors = 0;
1513         ns->tx_carrier_errors = 0;
1514         ns->tx_fifo_errors = pstats->tx_underrun;
1515         ns->tx_heartbeat_errors = 0;
1516         ns->tx_window_errors = 0;
1517         return ns;
1518 }
1519
1520 static u32 get_msglevel(struct net_device *dev)
1521 {
1522         struct port_info *pi = netdev_priv(dev);
1523         struct adapter *adapter = pi->adapter;
1524
1525         return adapter->msg_enable;
1526 }
1527
1528 static void set_msglevel(struct net_device *dev, u32 val)
1529 {
1530         struct port_info *pi = netdev_priv(dev);
1531         struct adapter *adapter = pi->adapter;
1532
1533         adapter->msg_enable = val;
1534 }
1535
1536 static const char stats_strings[][ETH_GSTRING_LEN] = {
1537         "TxOctetsOK         ",
1538         "TxFramesOK         ",
1539         "TxMulticastFramesOK",
1540         "TxBroadcastFramesOK",
1541         "TxPauseFrames      ",
1542         "TxUnderrun         ",
1543         "TxExtUnderrun      ",
1544
1545         "TxFrames64         ",
1546         "TxFrames65To127    ",
1547         "TxFrames128To255   ",
1548         "TxFrames256To511   ",
1549         "TxFrames512To1023  ",
1550         "TxFrames1024To1518 ",
1551         "TxFrames1519ToMax  ",
1552
1553         "RxOctetsOK         ",
1554         "RxFramesOK         ",
1555         "RxMulticastFramesOK",
1556         "RxBroadcastFramesOK",
1557         "RxPauseFrames      ",
1558         "RxFCSErrors        ",
1559         "RxSymbolErrors     ",
1560         "RxShortErrors      ",
1561         "RxJabberErrors     ",
1562         "RxLengthErrors     ",
1563         "RxFIFOoverflow     ",
1564
1565         "RxFrames64         ",
1566         "RxFrames65To127    ",
1567         "RxFrames128To255   ",
1568         "RxFrames256To511   ",
1569         "RxFrames512To1023  ",
1570         "RxFrames1024To1518 ",
1571         "RxFrames1519ToMax  ",
1572
1573         "PhyFIFOErrors      ",
1574         "TSO                ",
1575         "VLANextractions    ",
1576         "VLANinsertions     ",
1577         "TxCsumOffload      ",
1578         "RxCsumGood         ",
1579         "LroAggregated      ",
1580         "LroFlushed         ",
1581         "LroNoDesc          ",
1582         "RxDrops            ",
1583
1584         "CheckTXEnToggled   ",
1585         "CheckResets        ",
1586
1587         "LinkFaults         ",
1588 };
1589
1590 static int get_sset_count(struct net_device *dev, int sset)
1591 {
1592         switch (sset) {
1593         case ETH_SS_STATS:
1594                 return ARRAY_SIZE(stats_strings);
1595         default:
1596                 return -EOPNOTSUPP;
1597         }
1598 }
1599
1600 #define T3_REGMAP_SIZE (3 * 1024)
1601
1602 static int get_regs_len(struct net_device *dev)
1603 {
1604         return T3_REGMAP_SIZE;
1605 }
1606
1607 static int get_eeprom_len(struct net_device *dev)
1608 {
1609         return EEPROMSIZE;
1610 }
1611
1612 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1613 {
1614         struct port_info *pi = netdev_priv(dev);
1615         struct adapter *adapter = pi->adapter;
1616         u32 fw_vers = 0;
1617         u32 tp_vers = 0;
1618
1619         spin_lock(&adapter->stats_lock);
1620         t3_get_fw_version(adapter, &fw_vers);
1621         t3_get_tp_version(adapter, &tp_vers);
1622         spin_unlock(&adapter->stats_lock);
1623
1624         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1625         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1626         strlcpy(info->bus_info, pci_name(adapter->pdev),
1627                 sizeof(info->bus_info));
1628         if (fw_vers)
1629                 snprintf(info->fw_version, sizeof(info->fw_version),
1630                          "%s %u.%u.%u TP %u.%u.%u",
1631                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1632                          G_FW_VERSION_MAJOR(fw_vers),
1633                          G_FW_VERSION_MINOR(fw_vers),
1634                          G_FW_VERSION_MICRO(fw_vers),
1635                          G_TP_VERSION_MAJOR(tp_vers),
1636                          G_TP_VERSION_MINOR(tp_vers),
1637                          G_TP_VERSION_MICRO(tp_vers));
1638 }
1639
1640 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1641 {
1642         if (stringset == ETH_SS_STATS)
1643                 memcpy(data, stats_strings, sizeof(stats_strings));
1644 }
1645
1646 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1647                                             struct port_info *p, int idx)
1648 {
1649         int i;
1650         unsigned long tot = 0;
1651
1652         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1653                 tot += adapter->sge.qs[i].port_stats[idx];
1654         return tot;
1655 }
1656
1657 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1658                       u64 *data)
1659 {
1660         struct port_info *pi = netdev_priv(dev);
1661         struct adapter *adapter = pi->adapter;
1662         const struct mac_stats *s;
1663
1664         spin_lock(&adapter->stats_lock);
1665         s = t3_mac_update_stats(&pi->mac);
1666         spin_unlock(&adapter->stats_lock);
1667
1668         *data++ = s->tx_octets;
1669         *data++ = s->tx_frames;
1670         *data++ = s->tx_mcast_frames;
1671         *data++ = s->tx_bcast_frames;
1672         *data++ = s->tx_pause;
1673         *data++ = s->tx_underrun;
1674         *data++ = s->tx_fifo_urun;
1675
1676         *data++ = s->tx_frames_64;
1677         *data++ = s->tx_frames_65_127;
1678         *data++ = s->tx_frames_128_255;
1679         *data++ = s->tx_frames_256_511;
1680         *data++ = s->tx_frames_512_1023;
1681         *data++ = s->tx_frames_1024_1518;
1682         *data++ = s->tx_frames_1519_max;
1683
1684         *data++ = s->rx_octets;
1685         *data++ = s->rx_frames;
1686         *data++ = s->rx_mcast_frames;
1687         *data++ = s->rx_bcast_frames;
1688         *data++ = s->rx_pause;
1689         *data++ = s->rx_fcs_errs;
1690         *data++ = s->rx_symbol_errs;
1691         *data++ = s->rx_short;
1692         *data++ = s->rx_jabber;
1693         *data++ = s->rx_too_long;
1694         *data++ = s->rx_fifo_ovfl;
1695
1696         *data++ = s->rx_frames_64;
1697         *data++ = s->rx_frames_65_127;
1698         *data++ = s->rx_frames_128_255;
1699         *data++ = s->rx_frames_256_511;
1700         *data++ = s->rx_frames_512_1023;
1701         *data++ = s->rx_frames_1024_1518;
1702         *data++ = s->rx_frames_1519_max;
1703
1704         *data++ = pi->phy.fifo_errors;
1705
1706         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1707         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1708         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1709         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1710         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1711         *data++ = 0;
1712         *data++ = 0;
1713         *data++ = 0;
1714         *data++ = s->rx_cong_drops;
1715
1716         *data++ = s->num_toggled;
1717         *data++ = s->num_resets;
1718
1719         *data++ = s->link_faults;
1720 }
1721
1722 static inline void reg_block_dump(struct adapter *ap, void *buf,
1723                                   unsigned int start, unsigned int end)
1724 {
1725         u32 *p = buf + start;
1726
1727         for (; start <= end; start += sizeof(u32))
1728                 *p++ = t3_read_reg(ap, start);
1729 }
1730
1731 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1732                      void *buf)
1733 {
1734         struct port_info *pi = netdev_priv(dev);
1735         struct adapter *ap = pi->adapter;
1736
1737         /*
1738          * Version scheme:
1739          * bits 0..9: chip version
1740          * bits 10..15: chip revision
1741          * bit 31: set for PCIe cards
1742          */
1743         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1744
1745         /*
1746          * We skip the MAC statistics registers because they are clear-on-read.
1747          * Also reading multi-register stats would need to synchronize with the
1748          * periodic mac stats accumulation.  Hard to justify the complexity.
1749          */
1750         memset(buf, 0, T3_REGMAP_SIZE);
1751         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1752         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1753         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1754         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1755         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1756         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1757                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1758         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1759                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1760 }
1761
1762 static int restart_autoneg(struct net_device *dev)
1763 {
1764         struct port_info *p = netdev_priv(dev);
1765
1766         if (!netif_running(dev))
1767                 return -EAGAIN;
1768         if (p->link_config.autoneg != AUTONEG_ENABLE)
1769                 return -EINVAL;
1770         p->phy.ops->autoneg_restart(&p->phy);
1771         return 0;
1772 }
1773
1774 static int set_phys_id(struct net_device *dev,
1775                        enum ethtool_phys_id_state state)
1776 {
1777         struct port_info *pi = netdev_priv(dev);
1778         struct adapter *adapter = pi->adapter;
1779
1780         switch (state) {
1781         case ETHTOOL_ID_ACTIVE:
1782                 return 1;       /* cycle on/off once per second */
1783
1784         case ETHTOOL_ID_OFF:
1785                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1786                 break;
1787
1788         case ETHTOOL_ID_ON:
1789         case ETHTOOL_ID_INACTIVE:
1790                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1791                          F_GPIO0_OUT_VAL);
1792         }
1793
1794         return 0;
1795 }
1796
1797 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1798 {
1799         struct port_info *p = netdev_priv(dev);
1800
1801         cmd->supported = p->link_config.supported;
1802         cmd->advertising = p->link_config.advertising;
1803
1804         if (netif_carrier_ok(dev)) {
1805                 ethtool_cmd_speed_set(cmd, p->link_config.speed);
1806                 cmd->duplex = p->link_config.duplex;
1807         } else {
1808                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
1809                 cmd->duplex = DUPLEX_UNKNOWN;
1810         }
1811
1812         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1813         cmd->phy_address = p->phy.mdio.prtad;
1814         cmd->transceiver = XCVR_EXTERNAL;
1815         cmd->autoneg = p->link_config.autoneg;
1816         cmd->maxtxpkt = 0;
1817         cmd->maxrxpkt = 0;
1818         return 0;
1819 }
1820
1821 static int speed_duplex_to_caps(int speed, int duplex)
1822 {
1823         int cap = 0;
1824
1825         switch (speed) {
1826         case SPEED_10:
1827                 if (duplex == DUPLEX_FULL)
1828                         cap = SUPPORTED_10baseT_Full;
1829                 else
1830                         cap = SUPPORTED_10baseT_Half;
1831                 break;
1832         case SPEED_100:
1833                 if (duplex == DUPLEX_FULL)
1834                         cap = SUPPORTED_100baseT_Full;
1835                 else
1836                         cap = SUPPORTED_100baseT_Half;
1837                 break;
1838         case SPEED_1000:
1839                 if (duplex == DUPLEX_FULL)
1840                         cap = SUPPORTED_1000baseT_Full;
1841                 else
1842                         cap = SUPPORTED_1000baseT_Half;
1843                 break;
1844         case SPEED_10000:
1845                 if (duplex == DUPLEX_FULL)
1846                         cap = SUPPORTED_10000baseT_Full;
1847         }
1848         return cap;
1849 }
1850
1851 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1852                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1853                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1854                       ADVERTISED_10000baseT_Full)
1855
1856 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1857 {
1858         struct port_info *p = netdev_priv(dev);
1859         struct link_config *lc = &p->link_config;
1860
1861         if (!(lc->supported & SUPPORTED_Autoneg)) {
1862                 /*
1863                  * PHY offers a single speed/duplex.  See if that's what's
1864                  * being requested.
1865                  */
1866                 if (cmd->autoneg == AUTONEG_DISABLE) {
1867                         u32 speed = ethtool_cmd_speed(cmd);
1868                         int cap = speed_duplex_to_caps(speed, cmd->duplex);
1869                         if (lc->supported & cap)
1870                                 return 0;
1871                 }
1872                 return -EINVAL;
1873         }
1874
1875         if (cmd->autoneg == AUTONEG_DISABLE) {
1876                 u32 speed = ethtool_cmd_speed(cmd);
1877                 int cap = speed_duplex_to_caps(speed, cmd->duplex);
1878
1879                 if (!(lc->supported & cap) || (speed == SPEED_1000))
1880                         return -EINVAL;
1881                 lc->requested_speed = speed;
1882                 lc->requested_duplex = cmd->duplex;
1883                 lc->advertising = 0;
1884         } else {
1885                 cmd->advertising &= ADVERTISED_MASK;
1886                 cmd->advertising &= lc->supported;
1887                 if (!cmd->advertising)
1888                         return -EINVAL;
1889                 lc->requested_speed = SPEED_INVALID;
1890                 lc->requested_duplex = DUPLEX_INVALID;
1891                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1892         }
1893         lc->autoneg = cmd->autoneg;
1894         if (netif_running(dev))
1895                 t3_link_start(&p->phy, &p->mac, lc);
1896         return 0;
1897 }
1898
1899 static void get_pauseparam(struct net_device *dev,
1900                            struct ethtool_pauseparam *epause)
1901 {
1902         struct port_info *p = netdev_priv(dev);
1903
1904         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1905         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1906         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1907 }
1908
1909 static int set_pauseparam(struct net_device *dev,
1910                           struct ethtool_pauseparam *epause)
1911 {
1912         struct port_info *p = netdev_priv(dev);
1913         struct link_config *lc = &p->link_config;
1914
1915         if (epause->autoneg == AUTONEG_DISABLE)
1916                 lc->requested_fc = 0;
1917         else if (lc->supported & SUPPORTED_Autoneg)
1918                 lc->requested_fc = PAUSE_AUTONEG;
1919         else
1920                 return -EINVAL;
1921
1922         if (epause->rx_pause)
1923                 lc->requested_fc |= PAUSE_RX;
1924         if (epause->tx_pause)
1925                 lc->requested_fc |= PAUSE_TX;
1926         if (lc->autoneg == AUTONEG_ENABLE) {
1927                 if (netif_running(dev))
1928                         t3_link_start(&p->phy, &p->mac, lc);
1929         } else {
1930                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1931                 if (netif_running(dev))
1932                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1933         }
1934         return 0;
1935 }
1936
1937 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1938 {
1939         struct port_info *pi = netdev_priv(dev);
1940         struct adapter *adapter = pi->adapter;
1941         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1942
1943         e->rx_max_pending = MAX_RX_BUFFERS;
1944         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1945         e->tx_max_pending = MAX_TXQ_ENTRIES;
1946
1947         e->rx_pending = q->fl_size;
1948         e->rx_mini_pending = q->rspq_size;
1949         e->rx_jumbo_pending = q->jumbo_size;
1950         e->tx_pending = q->txq_size[0];
1951 }
1952
1953 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1954 {
1955         struct port_info *pi = netdev_priv(dev);
1956         struct adapter *adapter = pi->adapter;
1957         struct qset_params *q;
1958         int i;
1959
1960         if (e->rx_pending > MAX_RX_BUFFERS ||
1961             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1962             e->tx_pending > MAX_TXQ_ENTRIES ||
1963             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1964             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1965             e->rx_pending < MIN_FL_ENTRIES ||
1966             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1967             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1968                 return -EINVAL;
1969
1970         if (adapter->flags & FULL_INIT_DONE)
1971                 return -EBUSY;
1972
1973         q = &adapter->params.sge.qset[pi->first_qset];
1974         for (i = 0; i < pi->nqsets; ++i, ++q) {
1975                 q->rspq_size = e->rx_mini_pending;
1976                 q->fl_size = e->rx_pending;
1977                 q->jumbo_size = e->rx_jumbo_pending;
1978                 q->txq_size[0] = e->tx_pending;
1979                 q->txq_size[1] = e->tx_pending;
1980                 q->txq_size[2] = e->tx_pending;
1981         }
1982         return 0;
1983 }
1984
1985 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1986 {
1987         struct port_info *pi = netdev_priv(dev);
1988         struct adapter *adapter = pi->adapter;
1989         struct qset_params *qsp;
1990         struct sge_qset *qs;
1991         int i;
1992
1993         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1994                 return -EINVAL;
1995
1996         for (i = 0; i < pi->nqsets; i++) {
1997                 qsp = &adapter->params.sge.qset[i];
1998                 qs = &adapter->sge.qs[i];
1999                 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2000                 t3_update_qset_coalesce(qs, qsp);
2001         }
2002
2003         return 0;
2004 }
2005
2006 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2007 {
2008         struct port_info *pi = netdev_priv(dev);
2009         struct adapter *adapter = pi->adapter;
2010         struct qset_params *q = adapter->params.sge.qset;
2011
2012         c->rx_coalesce_usecs = q->coalesce_usecs;
2013         return 0;
2014 }
2015
2016 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2017                       u8 * data)
2018 {
2019         struct port_info *pi = netdev_priv(dev);
2020         struct adapter *adapter = pi->adapter;
2021         int i, err = 0;
2022
2023         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2024         if (!buf)
2025                 return -ENOMEM;
2026
2027         e->magic = EEPROM_MAGIC;
2028         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2029                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2030
2031         if (!err)
2032                 memcpy(data, buf + e->offset, e->len);
2033         kfree(buf);
2034         return err;
2035 }
2036
2037 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2038                       u8 * data)
2039 {
2040         struct port_info *pi = netdev_priv(dev);
2041         struct adapter *adapter = pi->adapter;
2042         u32 aligned_offset, aligned_len;
2043         __le32 *p;
2044         u8 *buf;
2045         int err;
2046
2047         if (eeprom->magic != EEPROM_MAGIC)
2048                 return -EINVAL;
2049
2050         aligned_offset = eeprom->offset & ~3;
2051         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2052
2053         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2054                 buf = kmalloc(aligned_len, GFP_KERNEL);
2055                 if (!buf)
2056                         return -ENOMEM;
2057                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2058                 if (!err && aligned_len > 4)
2059                         err = t3_seeprom_read(adapter,
2060                                               aligned_offset + aligned_len - 4,
2061                                               (__le32 *) & buf[aligned_len - 4]);
2062                 if (err)
2063                         goto out;
2064                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2065         } else
2066                 buf = data;
2067
2068         err = t3_seeprom_wp(adapter, 0);
2069         if (err)
2070                 goto out;
2071
2072         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2073                 err = t3_seeprom_write(adapter, aligned_offset, *p);
2074                 aligned_offset += 4;
2075         }
2076
2077         if (!err)
2078                 err = t3_seeprom_wp(adapter, 1);
2079 out:
2080         if (buf != data)
2081                 kfree(buf);
2082         return err;
2083 }
2084
2085 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2086 {
2087         wol->supported = 0;
2088         wol->wolopts = 0;
2089         memset(&wol->sopass, 0, sizeof(wol->sopass));
2090 }
2091
2092 static const struct ethtool_ops cxgb_ethtool_ops = {
2093         .get_settings = get_settings,
2094         .set_settings = set_settings,
2095         .get_drvinfo = get_drvinfo,
2096         .get_msglevel = get_msglevel,
2097         .set_msglevel = set_msglevel,
2098         .get_ringparam = get_sge_param,
2099         .set_ringparam = set_sge_param,
2100         .get_coalesce = get_coalesce,
2101         .set_coalesce = set_coalesce,
2102         .get_eeprom_len = get_eeprom_len,
2103         .get_eeprom = get_eeprom,
2104         .set_eeprom = set_eeprom,
2105         .get_pauseparam = get_pauseparam,
2106         .set_pauseparam = set_pauseparam,
2107         .get_link = ethtool_op_get_link,
2108         .get_strings = get_strings,
2109         .set_phys_id = set_phys_id,
2110         .nway_reset = restart_autoneg,
2111         .get_sset_count = get_sset_count,
2112         .get_ethtool_stats = get_stats,
2113         .get_regs_len = get_regs_len,
2114         .get_regs = get_regs,
2115         .get_wol = get_wol,
2116 };
2117
2118 static int in_range(int val, int lo, int hi)
2119 {
2120         return val < 0 || (val <= hi && val >= lo);
2121 }
2122
2123 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2124 {
2125         struct port_info *pi = netdev_priv(dev);
2126         struct adapter *adapter = pi->adapter;
2127         u32 cmd;
2128         int ret;
2129
2130         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2131                 return -EFAULT;
2132
2133         switch (cmd) {
2134         case CHELSIO_SET_QSET_PARAMS:{
2135                 int i;
2136                 struct qset_params *q;
2137                 struct ch_qset_params t;
2138                 int q1 = pi->first_qset;
2139                 int nqsets = pi->nqsets;
2140
2141                 if (!capable(CAP_NET_ADMIN))
2142                         return -EPERM;
2143                 if (copy_from_user(&t, useraddr, sizeof(t)))
2144                         return -EFAULT;
2145                 if (t.qset_idx >= SGE_QSETS)
2146                         return -EINVAL;
2147                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2148                     !in_range(t.cong_thres, 0, 255) ||
2149                     !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2150                               MAX_TXQ_ENTRIES) ||
2151                     !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2152                               MAX_TXQ_ENTRIES) ||
2153                     !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2154                               MAX_CTRL_TXQ_ENTRIES) ||
2155                     !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2156                               MAX_RX_BUFFERS) ||
2157                     !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2158                               MAX_RX_JUMBO_BUFFERS) ||
2159                     !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2160                               MAX_RSPQ_ENTRIES))
2161                         return -EINVAL;
2162
2163                 if ((adapter->flags & FULL_INIT_DONE) &&
2164                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2165                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2166                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2167                         t.polling >= 0 || t.cong_thres >= 0))
2168                         return -EBUSY;
2169
2170                 /* Allow setting of any available qset when offload enabled */
2171                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2172                         q1 = 0;
2173                         for_each_port(adapter, i) {
2174                                 pi = adap2pinfo(adapter, i);
2175                                 nqsets += pi->first_qset + pi->nqsets;
2176                         }
2177                 }
2178
2179                 if (t.qset_idx < q1)
2180                         return -EINVAL;
2181                 if (t.qset_idx > q1 + nqsets - 1)
2182                         return -EINVAL;
2183
2184                 q = &adapter->params.sge.qset[t.qset_idx];
2185
2186                 if (t.rspq_size >= 0)
2187                         q->rspq_size = t.rspq_size;
2188                 if (t.fl_size[0] >= 0)
2189                         q->fl_size = t.fl_size[0];
2190                 if (t.fl_size[1] >= 0)
2191                         q->jumbo_size = t.fl_size[1];
2192                 if (t.txq_size[0] >= 0)
2193                         q->txq_size[0] = t.txq_size[0];
2194                 if (t.txq_size[1] >= 0)
2195                         q->txq_size[1] = t.txq_size[1];
2196                 if (t.txq_size[2] >= 0)
2197                         q->txq_size[2] = t.txq_size[2];
2198                 if (t.cong_thres >= 0)
2199                         q->cong_thres = t.cong_thres;
2200                 if (t.intr_lat >= 0) {
2201                         struct sge_qset *qs =
2202                                 &adapter->sge.qs[t.qset_idx];
2203
2204                         q->coalesce_usecs = t.intr_lat;
2205                         t3_update_qset_coalesce(qs, q);
2206                 }
2207                 if (t.polling >= 0) {
2208                         if (adapter->flags & USING_MSIX)
2209                                 q->polling = t.polling;
2210                         else {
2211                                 /* No polling with INTx for T3A */
2212                                 if (adapter->params.rev == 0 &&
2213                                         !(adapter->flags & USING_MSI))
2214                                         t.polling = 0;
2215
2216                                 for (i = 0; i < SGE_QSETS; i++) {
2217                                         q = &adapter->params.sge.
2218                                                 qset[i];
2219                                         q->polling = t.polling;
2220                                 }
2221                         }
2222                 }
2223
2224                 if (t.lro >= 0) {
2225                         if (t.lro)
2226                                 dev->wanted_features |= NETIF_F_GRO;
2227                         else
2228                                 dev->wanted_features &= ~NETIF_F_GRO;
2229                         netdev_update_features(dev);
2230                 }
2231
2232                 break;
2233         }
2234         case CHELSIO_GET_QSET_PARAMS:{
2235                 struct qset_params *q;
2236                 struct ch_qset_params t;
2237                 int q1 = pi->first_qset;
2238                 int nqsets = pi->nqsets;
2239                 int i;
2240
2241                 if (copy_from_user(&t, useraddr, sizeof(t)))
2242                         return -EFAULT;
2243
2244                 /* Display qsets for all ports when offload enabled */
2245                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2246                         q1 = 0;
2247                         for_each_port(adapter, i) {
2248                                 pi = adap2pinfo(adapter, i);
2249                                 nqsets = pi->first_qset + pi->nqsets;
2250                         }
2251                 }
2252
2253                 if (t.qset_idx >= nqsets)
2254                         return -EINVAL;
2255
2256                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2257                 t.rspq_size = q->rspq_size;
2258                 t.txq_size[0] = q->txq_size[0];
2259                 t.txq_size[1] = q->txq_size[1];
2260                 t.txq_size[2] = q->txq_size[2];
2261                 t.fl_size[0] = q->fl_size;
2262                 t.fl_size[1] = q->jumbo_size;
2263                 t.polling = q->polling;
2264                 t.lro = !!(dev->features & NETIF_F_GRO);
2265                 t.intr_lat = q->coalesce_usecs;
2266                 t.cong_thres = q->cong_thres;
2267                 t.qnum = q1;
2268
2269                 if (adapter->flags & USING_MSIX)
2270                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2271                 else
2272                         t.vector = adapter->pdev->irq;
2273
2274                 if (copy_to_user(useraddr, &t, sizeof(t)))
2275                         return -EFAULT;
2276                 break;
2277         }
2278         case CHELSIO_SET_QSET_NUM:{
2279                 struct ch_reg edata;
2280                 unsigned int i, first_qset = 0, other_qsets = 0;
2281
2282                 if (!capable(CAP_NET_ADMIN))
2283                         return -EPERM;
2284                 if (adapter->flags & FULL_INIT_DONE)
2285                         return -EBUSY;
2286                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2287                         return -EFAULT;
2288                 if (edata.val < 1 ||
2289                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2290                         return -EINVAL;
2291
2292                 for_each_port(adapter, i)
2293                         if (adapter->port[i] && adapter->port[i] != dev)
2294                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2295
2296                 if (edata.val + other_qsets > SGE_QSETS)
2297                         return -EINVAL;
2298
2299                 pi->nqsets = edata.val;
2300
2301                 for_each_port(adapter, i)
2302                         if (adapter->port[i]) {
2303                                 pi = adap2pinfo(adapter, i);
2304                                 pi->first_qset = first_qset;
2305                                 first_qset += pi->nqsets;
2306                         }
2307                 break;
2308         }
2309         case CHELSIO_GET_QSET_NUM:{
2310                 struct ch_reg edata;
2311
2312                 memset(&edata, 0, sizeof(struct ch_reg));
2313
2314                 edata.cmd = CHELSIO_GET_QSET_NUM;
2315                 edata.val = pi->nqsets;
2316                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2317                         return -EFAULT;
2318                 break;
2319         }
2320         case CHELSIO_LOAD_FW:{
2321                 u8 *fw_data;
2322                 struct ch_mem_range t;
2323
2324                 if (!capable(CAP_SYS_RAWIO))
2325                         return -EPERM;
2326                 if (copy_from_user(&t, useraddr, sizeof(t)))
2327                         return -EFAULT;
2328                 /* Check t.len sanity ? */
2329                 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2330                 if (IS_ERR(fw_data))
2331                         return PTR_ERR(fw_data);
2332
2333                 ret = t3_load_fw(adapter, fw_data, t.len);
2334                 kfree(fw_data);
2335                 if (ret)
2336                         return ret;
2337                 break;
2338         }
2339         case CHELSIO_SETMTUTAB:{
2340                 struct ch_mtus m;
2341                 int i;
2342
2343                 if (!is_offload(adapter))
2344                         return -EOPNOTSUPP;
2345                 if (!capable(CAP_NET_ADMIN))
2346                         return -EPERM;
2347                 if (offload_running(adapter))
2348                         return -EBUSY;
2349                 if (copy_from_user(&m, useraddr, sizeof(m)))
2350                         return -EFAULT;
2351                 if (m.nmtus != NMTUS)
2352                         return -EINVAL;
2353                 if (m.mtus[0] < 81)     /* accommodate SACK */
2354                         return -EINVAL;
2355
2356                 /* MTUs must be in ascending order */
2357                 for (i = 1; i < NMTUS; ++i)
2358                         if (m.mtus[i] < m.mtus[i - 1])
2359                                 return -EINVAL;
2360
2361                 memcpy(adapter->params.mtus, m.mtus,
2362                         sizeof(adapter->params.mtus));
2363                 break;
2364         }
2365         case CHELSIO_GET_PM:{
2366                 struct tp_params *p = &adapter->params.tp;
2367                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2368
2369                 if (!is_offload(adapter))
2370                         return -EOPNOTSUPP;
2371                 m.tx_pg_sz = p->tx_pg_size;
2372                 m.tx_num_pg = p->tx_num_pgs;
2373                 m.rx_pg_sz = p->rx_pg_size;
2374                 m.rx_num_pg = p->rx_num_pgs;
2375                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2376                 if (copy_to_user(useraddr, &m, sizeof(m)))
2377                         return -EFAULT;
2378                 break;
2379         }
2380         case CHELSIO_SET_PM:{
2381                 struct ch_pm m;
2382                 struct tp_params *p = &adapter->params.tp;
2383
2384                 if (!is_offload(adapter))
2385                         return -EOPNOTSUPP;
2386                 if (!capable(CAP_NET_ADMIN))
2387                         return -EPERM;
2388                 if (adapter->flags & FULL_INIT_DONE)
2389                         return -EBUSY;
2390                 if (copy_from_user(&m, useraddr, sizeof(m)))
2391                         return -EFAULT;
2392                 if (!is_power_of_2(m.rx_pg_sz) ||
2393                         !is_power_of_2(m.tx_pg_sz))
2394                         return -EINVAL; /* not power of 2 */
2395                 if (!(m.rx_pg_sz & 0x14000))
2396                         return -EINVAL; /* not 16KB or 64KB */
2397                 if (!(m.tx_pg_sz & 0x1554000))
2398                         return -EINVAL;
2399                 if (m.tx_num_pg == -1)
2400                         m.tx_num_pg = p->tx_num_pgs;
2401                 if (m.rx_num_pg == -1)
2402                         m.rx_num_pg = p->rx_num_pgs;
2403                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2404                         return -EINVAL;
2405                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2406                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2407                         return -EINVAL;
2408                 p->rx_pg_size = m.rx_pg_sz;
2409                 p->tx_pg_size = m.tx_pg_sz;
2410                 p->rx_num_pgs = m.rx_num_pg;
2411                 p->tx_num_pgs = m.tx_num_pg;
2412                 break;
2413         }
2414         case CHELSIO_GET_MEM:{
2415                 struct ch_mem_range t;
2416                 struct mc7 *mem;
2417                 u64 buf[32];
2418
2419                 if (!is_offload(adapter))
2420                         return -EOPNOTSUPP;
2421                 if (!(adapter->flags & FULL_INIT_DONE))
2422                         return -EIO;    /* need the memory controllers */
2423                 if (copy_from_user(&t, useraddr, sizeof(t)))
2424                         return -EFAULT;
2425                 if ((t.addr & 7) || (t.len & 7))
2426                         return -EINVAL;
2427                 if (t.mem_id == MEM_CM)
2428                         mem = &adapter->cm;
2429                 else if (t.mem_id == MEM_PMRX)
2430                         mem = &adapter->pmrx;
2431                 else if (t.mem_id == MEM_PMTX)
2432                         mem = &adapter->pmtx;
2433                 else
2434                         return -EINVAL;
2435
2436                 /*
2437                  * Version scheme:
2438                  * bits 0..9: chip version
2439                  * bits 10..15: chip revision
2440                  */
2441                 t.version = 3 | (adapter->params.rev << 10);
2442                 if (copy_to_user(useraddr, &t, sizeof(t)))
2443                         return -EFAULT;
2444
2445                 /*
2446                  * Read 256 bytes at a time as len can be large and we don't
2447                  * want to use huge intermediate buffers.
2448                  */
2449                 useraddr += sizeof(t);  /* advance to start of buffer */
2450                 while (t.len) {
2451                         unsigned int chunk =
2452                                 min_t(unsigned int, t.len, sizeof(buf));
2453
2454                         ret =
2455                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2456                                                 buf);
2457                         if (ret)
2458                                 return ret;
2459                         if (copy_to_user(useraddr, buf, chunk))
2460                                 return -EFAULT;
2461                         useraddr += chunk;
2462                         t.addr += chunk;
2463                         t.len -= chunk;
2464                 }
2465                 break;
2466         }
2467         case CHELSIO_SET_TRACE_FILTER:{
2468                 struct ch_trace t;
2469                 const struct trace_params *tp;
2470
2471                 if (!capable(CAP_NET_ADMIN))
2472                         return -EPERM;
2473                 if (!offload_running(adapter))
2474                         return -EAGAIN;
2475                 if (copy_from_user(&t, useraddr, sizeof(t)))
2476                         return -EFAULT;
2477
2478                 tp = (const struct trace_params *)&t.sip;
2479                 if (t.config_tx)
2480                         t3_config_trace_filter(adapter, tp, 0,
2481                                                 t.invert_match,
2482                                                 t.trace_tx);
2483                 if (t.config_rx)
2484                         t3_config_trace_filter(adapter, tp, 1,
2485                                                 t.invert_match,
2486                                                 t.trace_rx);
2487                 break;
2488         }
2489         default:
2490                 return -EOPNOTSUPP;
2491         }
2492         return 0;
2493 }
2494
2495 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2496 {
2497         struct mii_ioctl_data *data = if_mii(req);
2498         struct port_info *pi = netdev_priv(dev);
2499         struct adapter *adapter = pi->adapter;
2500
2501         switch (cmd) {
2502         case SIOCGMIIREG:
2503         case SIOCSMIIREG:
2504                 /* Convert phy_id from older PRTAD/DEVAD format */
2505                 if (is_10G(adapter) &&
2506                     !mdio_phy_id_is_c45(data->phy_id) &&
2507                     (data->phy_id & 0x1f00) &&
2508                     !(data->phy_id & 0xe0e0))
2509                         data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2510                                                        data->phy_id & 0x1f);
2511                 /* FALLTHRU */
2512         case SIOCGMIIPHY:
2513                 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2514         case SIOCCHIOCTL:
2515                 return cxgb_extension_ioctl(dev, req->ifr_data);
2516         default:
2517                 return -EOPNOTSUPP;
2518         }
2519 }
2520
2521 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2522 {
2523         struct port_info *pi = netdev_priv(dev);
2524         struct adapter *adapter = pi->adapter;
2525         int ret;
2526
2527         if (new_mtu < 81)       /* accommodate SACK */
2528                 return -EINVAL;
2529         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2530                 return ret;
2531         dev->mtu = new_mtu;
2532         init_port_mtus(adapter);
2533         if (adapter->params.rev == 0 && offload_running(adapter))
2534                 t3_load_mtus(adapter, adapter->params.mtus,
2535                              adapter->params.a_wnd, adapter->params.b_wnd,
2536                              adapter->port[0]->mtu);
2537         return 0;
2538 }
2539
2540 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2541 {
2542         struct port_info *pi = netdev_priv(dev);
2543         struct adapter *adapter = pi->adapter;
2544         struct sockaddr *addr = p;
2545
2546         if (!is_valid_ether_addr(addr->sa_data))
2547                 return -EADDRNOTAVAIL;
2548
2549         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2550         t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2551         if (offload_running(adapter))
2552                 write_smt_entry(adapter, pi->port_id);
2553         return 0;
2554 }
2555
2556 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2557         netdev_features_t features)
2558 {
2559         /*
2560          * Since there is no support for separate rx/tx vlan accel
2561          * enable/disable make sure tx flag is always in same state as rx.
2562          */
2563         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2564                 features |= NETIF_F_HW_VLAN_CTAG_TX;
2565         else
2566                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2567
2568         return features;
2569 }
2570
2571 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2572 {
2573         netdev_features_t changed = dev->features ^ features;
2574
2575         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2576                 cxgb_vlan_mode(dev, features);
2577
2578         return 0;
2579 }
2580
2581 #ifdef CONFIG_NET_POLL_CONTROLLER
2582 static void cxgb_netpoll(struct net_device *dev)
2583 {
2584         struct port_info *pi = netdev_priv(dev);
2585         struct adapter *adapter = pi->adapter;
2586         int qidx;
2587
2588         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2589                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2590                 void *source;
2591
2592                 if (adapter->flags & USING_MSIX)
2593                         source = qs;
2594                 else
2595                         source = adapter;
2596
2597                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2598         }
2599 }
2600 #endif
2601
2602 /*
2603  * Periodic accumulation of MAC statistics.
2604  */
2605 static void mac_stats_update(struct adapter *adapter)
2606 {
2607         int i;
2608
2609         for_each_port(adapter, i) {
2610                 struct net_device *dev = adapter->port[i];
2611                 struct port_info *p = netdev_priv(dev);
2612
2613                 if (netif_running(dev)) {
2614                         spin_lock(&adapter->stats_lock);
2615                         t3_mac_update_stats(&p->mac);
2616                         spin_unlock(&adapter->stats_lock);
2617                 }
2618         }
2619 }
2620
2621 static void check_link_status(struct adapter *adapter)
2622 {
2623         int i;
2624
2625         for_each_port(adapter, i) {
2626                 struct net_device *dev = adapter->port[i];
2627                 struct port_info *p = netdev_priv(dev);
2628                 int link_fault;
2629
2630                 spin_lock_irq(&adapter->work_lock);
2631                 link_fault = p->link_fault;
2632                 spin_unlock_irq(&adapter->work_lock);
2633
2634                 if (link_fault) {
2635                         t3_link_fault(adapter, i);
2636                         continue;
2637                 }
2638
2639                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2640                         t3_xgm_intr_disable(adapter, i);
2641                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2642
2643                         t3_link_changed(adapter, i);
2644                         t3_xgm_intr_enable(adapter, i);
2645                 }
2646         }
2647 }
2648
2649 static void check_t3b2_mac(struct adapter *adapter)
2650 {
2651         int i;
2652
2653         if (!rtnl_trylock())    /* synchronize with ifdown */
2654                 return;
2655
2656         for_each_port(adapter, i) {
2657                 struct net_device *dev = adapter->port[i];
2658                 struct port_info *p = netdev_priv(dev);
2659                 int status;
2660
2661                 if (!netif_running(dev))
2662                         continue;
2663
2664                 status = 0;
2665                 if (netif_running(dev) && netif_carrier_ok(dev))
2666                         status = t3b2_mac_watchdog_task(&p->mac);
2667                 if (status == 1)
2668                         p->mac.stats.num_toggled++;
2669                 else if (status == 2) {
2670                         struct cmac *mac = &p->mac;
2671
2672                         t3_mac_set_mtu(mac, dev->mtu);
2673                         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2674                         cxgb_set_rxmode(dev);
2675                         t3_link_start(&p->phy, mac, &p->link_config);
2676                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2677                         t3_port_intr_enable(adapter, p->port_id);
2678                         p->mac.stats.num_resets++;
2679                 }
2680         }
2681         rtnl_unlock();
2682 }
2683
2684
2685 static void t3_adap_check_task(struct work_struct *work)
2686 {
2687         struct adapter *adapter = container_of(work, struct adapter,
2688                                                adap_check_task.work);
2689         const struct adapter_params *p = &adapter->params;
2690         int port;
2691         unsigned int v, status, reset;
2692
2693         adapter->check_task_cnt++;
2694
2695         check_link_status(adapter);
2696
2697         /* Accumulate MAC stats if needed */
2698         if (!p->linkpoll_period ||
2699             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2700             p->stats_update_period) {
2701                 mac_stats_update(adapter);
2702                 adapter->check_task_cnt = 0;
2703         }
2704
2705         if (p->rev == T3_REV_B2)
2706                 check_t3b2_mac(adapter);
2707
2708         /*
2709          * Scan the XGMAC's to check for various conditions which we want to
2710          * monitor in a periodic polling manner rather than via an interrupt
2711          * condition.  This is used for conditions which would otherwise flood
2712          * the system with interrupts and we only really need to know that the
2713          * conditions are "happening" ...  For each condition we count the
2714          * detection of the condition and reset it for the next polling loop.
2715          */
2716         for_each_port(adapter, port) {
2717                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2718                 u32 cause;
2719
2720                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2721                 reset = 0;
2722                 if (cause & F_RXFIFO_OVERFLOW) {
2723                         mac->stats.rx_fifo_ovfl++;
2724                         reset |= F_RXFIFO_OVERFLOW;
2725                 }
2726
2727                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2728         }
2729
2730         /*
2731          * We do the same as above for FL_EMPTY interrupts.
2732          */
2733         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2734         reset = 0;
2735
2736         if (status & F_FLEMPTY) {
2737                 struct sge_qset *qs = &adapter->sge.qs[0];
2738                 int i = 0;
2739
2740                 reset |= F_FLEMPTY;
2741
2742                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2743                     0xffff;
2744
2745                 while (v) {
2746                         qs->fl[i].empty += (v & 1);
2747                         if (i)
2748                                 qs++;
2749                         i ^= 1;
2750                         v >>= 1;
2751                 }
2752         }
2753
2754         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2755
2756         /* Schedule the next check update if any port is active. */
2757         spin_lock_irq(&adapter->work_lock);
2758         if (adapter->open_device_map & PORT_MASK)
2759                 schedule_chk_task(adapter);
2760         spin_unlock_irq(&adapter->work_lock);
2761 }
2762
2763 static void db_full_task(struct work_struct *work)
2764 {
2765         struct adapter *adapter = container_of(work, struct adapter,
2766                                                db_full_task);
2767
2768         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2769 }
2770
2771 static void db_empty_task(struct work_struct *work)
2772 {
2773         struct adapter *adapter = container_of(work, struct adapter,
2774                                                db_empty_task);
2775
2776         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2777 }
2778
2779 static void db_drop_task(struct work_struct *work)
2780 {
2781         struct adapter *adapter = container_of(work, struct adapter,
2782                                                db_drop_task);
2783         unsigned long delay = 1000;
2784         unsigned short r;
2785
2786         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2787
2788         /*
2789          * Sleep a while before ringing the driver qset dbs.
2790          * The delay is between 1000-2023 usecs.
2791          */
2792         get_random_bytes(&r, 2);
2793         delay += r & 1023;
2794         set_current_state(TASK_UNINTERRUPTIBLE);
2795         schedule_timeout(usecs_to_jiffies(delay));
2796         ring_dbs(adapter);
2797 }
2798
2799 /*
2800  * Processes external (PHY) interrupts in process context.
2801  */
2802 static void ext_intr_task(struct work_struct *work)
2803 {
2804         struct adapter *adapter = container_of(work, struct adapter,
2805                                                ext_intr_handler_task);
2806         int i;
2807
2808         /* Disable link fault interrupts */
2809         for_each_port(adapter, i) {
2810                 struct net_device *dev = adapter->port[i];
2811                 struct port_info *p = netdev_priv(dev);
2812
2813                 t3_xgm_intr_disable(adapter, i);
2814                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2815         }
2816
2817         /* Re-enable link fault interrupts */
2818         t3_phy_intr_handler(adapter);
2819
2820         for_each_port(adapter, i)
2821                 t3_xgm_intr_enable(adapter, i);
2822
2823         /* Now reenable external interrupts */
2824         spin_lock_irq(&adapter->work_lock);
2825         if (adapter->slow_intr_mask) {
2826                 adapter->slow_intr_mask |= F_T3DBG;
2827                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2828                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2829                              adapter->slow_intr_mask);
2830         }
2831         spin_unlock_irq(&adapter->work_lock);
2832 }
2833
2834 /*
2835  * Interrupt-context handler for external (PHY) interrupts.
2836  */
2837 void t3_os_ext_intr_handler(struct adapter *adapter)
2838 {
2839         /*
2840          * Schedule a task to handle external interrupts as they may be slow
2841          * and we use a mutex to protect MDIO registers.  We disable PHY
2842          * interrupts in the meantime and let the task reenable them when
2843          * it's done.
2844          */
2845         spin_lock(&adapter->work_lock);
2846         if (adapter->slow_intr_mask) {
2847                 adapter->slow_intr_mask &= ~F_T3DBG;
2848                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2849                              adapter->slow_intr_mask);
2850                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2851         }
2852         spin_unlock(&adapter->work_lock);
2853 }
2854
2855 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2856 {
2857         struct net_device *netdev = adapter->port[port_id];
2858         struct port_info *pi = netdev_priv(netdev);
2859
2860         spin_lock(&adapter->work_lock);
2861         pi->link_fault = 1;
2862         spin_unlock(&adapter->work_lock);
2863 }
2864
2865 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2866 {
2867         int i, ret = 0;
2868
2869         if (is_offload(adapter) &&
2870             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2871                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2872                 offload_close(&adapter->tdev);
2873         }
2874
2875         /* Stop all ports */
2876         for_each_port(adapter, i) {
2877                 struct net_device *netdev = adapter->port[i];
2878
2879                 if (netif_running(netdev))
2880                         __cxgb_close(netdev, on_wq);
2881         }
2882
2883         /* Stop SGE timers */
2884         t3_stop_sge_timers(adapter);
2885
2886         adapter->flags &= ~FULL_INIT_DONE;
2887
2888         if (reset)
2889                 ret = t3_reset_adapter(adapter);
2890
2891         pci_disable_device(adapter->pdev);
2892
2893         return ret;
2894 }
2895
2896 static int t3_reenable_adapter(struct adapter *adapter)
2897 {
2898         if (pci_enable_device(adapter->pdev)) {
2899                 dev_err(&adapter->pdev->dev,
2900                         "Cannot re-enable PCI device after reset.\n");
2901                 goto err;
2902         }
2903         pci_set_master(adapter->pdev);
2904         pci_restore_state(adapter->pdev);
2905         pci_save_state(adapter->pdev);
2906
2907         /* Free sge resources */
2908         t3_free_sge_resources(adapter);
2909
2910         if (t3_replay_prep_adapter(adapter))
2911                 goto err;
2912
2913         return 0;
2914 err:
2915         return -1;
2916 }
2917
2918 static void t3_resume_ports(struct adapter *adapter)
2919 {
2920         int i;
2921
2922         /* Restart the ports */
2923         for_each_port(adapter, i) {
2924                 struct net_device *netdev = adapter->port[i];
2925
2926                 if (netif_running(netdev)) {
2927                         if (cxgb_open(netdev)) {
2928                                 dev_err(&adapter->pdev->dev,
2929                                         "can't bring device back up"
2930                                         " after reset\n");
2931                                 continue;
2932                         }
2933                 }
2934         }
2935
2936         if (is_offload(adapter) && !ofld_disable)
2937                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2938 }
2939
2940 /*
2941  * processes a fatal error.
2942  * Bring the ports down, reset the chip, bring the ports back up.
2943  */
2944 static void fatal_error_task(struct work_struct *work)
2945 {
2946         struct adapter *adapter = container_of(work, struct adapter,
2947                                                fatal_error_handler_task);
2948         int err = 0;
2949
2950         rtnl_lock();
2951         err = t3_adapter_error(adapter, 1, 1);
2952         if (!err)
2953                 err = t3_reenable_adapter(adapter);
2954         if (!err)
2955                 t3_resume_ports(adapter);
2956
2957         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2958         rtnl_unlock();
2959 }
2960
2961 void t3_fatal_err(struct adapter *adapter)
2962 {
2963         unsigned int fw_status[4];
2964
2965         if (adapter->flags & FULL_INIT_DONE) {
2966                 t3_sge_stop(adapter);
2967                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2968                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2969                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2970                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2971
2972                 spin_lock(&adapter->work_lock);
2973                 t3_intr_disable(adapter);
2974                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2975                 spin_unlock(&adapter->work_lock);
2976         }
2977         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2978         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2979                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2980                          fw_status[0], fw_status[1],
2981                          fw_status[2], fw_status[3]);
2982 }
2983
2984 /**
2985  * t3_io_error_detected - called when PCI error is detected
2986  * @pdev: Pointer to PCI device
2987  * @state: The current pci connection state
2988  *
2989  * This function is called after a PCI bus error affecting
2990  * this device has been detected.
2991  */
2992 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2993                                              pci_channel_state_t state)
2994 {
2995         struct adapter *adapter = pci_get_drvdata(pdev);
2996
2997         if (state == pci_channel_io_perm_failure)
2998                 return PCI_ERS_RESULT_DISCONNECT;
2999
3000         t3_adapter_error(adapter, 0, 0);
3001
3002         /* Request a slot reset. */
3003         return PCI_ERS_RESULT_NEED_RESET;
3004 }
3005
3006 /**
3007  * t3_io_slot_reset - called after the pci bus has been reset.
3008  * @pdev: Pointer to PCI device
3009  *
3010  * Restart the card from scratch, as if from a cold-boot.
3011  */
3012 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3013 {
3014         struct adapter *adapter = pci_get_drvdata(pdev);
3015
3016         if (!t3_reenable_adapter(adapter))
3017                 return PCI_ERS_RESULT_RECOVERED;
3018
3019         return PCI_ERS_RESULT_DISCONNECT;
3020 }
3021
3022 /**
3023  * t3_io_resume - called when traffic can start flowing again.
3024  * @pdev: Pointer to PCI device
3025  *
3026  * This callback is called when the error recovery driver tells us that
3027  * its OK to resume normal operation.
3028  */
3029 static void t3_io_resume(struct pci_dev *pdev)
3030 {
3031         struct adapter *adapter = pci_get_drvdata(pdev);
3032
3033         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3034                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
3035
3036         rtnl_lock();
3037         t3_resume_ports(adapter);
3038         rtnl_unlock();
3039 }
3040
3041 static const struct pci_error_handlers t3_err_handler = {
3042         .error_detected = t3_io_error_detected,
3043         .slot_reset = t3_io_slot_reset,
3044         .resume = t3_io_resume,
3045 };
3046
3047 /*
3048  * Set the number of qsets based on the number of CPUs and the number of ports,
3049  * not to exceed the number of available qsets, assuming there are enough qsets
3050  * per port in HW.
3051  */
3052 static void set_nqsets(struct adapter *adap)
3053 {
3054         int i, j = 0;
3055         int num_cpus = netif_get_num_default_rss_queues();
3056         int hwports = adap->params.nports;
3057         int nqsets = adap->msix_nvectors - 1;
3058
3059         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3060                 if (hwports == 2 &&
3061                     (hwports * nqsets > SGE_QSETS ||
3062                      num_cpus >= nqsets / hwports))
3063                         nqsets /= hwports;
3064                 if (nqsets > num_cpus)
3065                         nqsets = num_cpus;
3066                 if (nqsets < 1 || hwports == 4)
3067                         nqsets = 1;
3068         } else
3069                 nqsets = 1;
3070
3071         for_each_port(adap, i) {
3072                 struct port_info *pi = adap2pinfo(adap, i);
3073
3074                 pi->first_qset = j;
3075                 pi->nqsets = nqsets;
3076                 j = pi->first_qset + nqsets;
3077
3078                 dev_info(&adap->pdev->dev,
3079                          "Port %d using %d queue sets.\n", i, nqsets);
3080         }
3081 }
3082
3083 static int cxgb_enable_msix(struct adapter *adap)
3084 {
3085         struct msix_entry entries[SGE_QSETS + 1];
3086         int vectors;
3087         int i;
3088
3089         vectors = ARRAY_SIZE(entries);
3090         for (i = 0; i < vectors; ++i)
3091                 entries[i].entry = i;
3092
3093         vectors = pci_enable_msix_range(adap->pdev, entries,
3094                                         adap->params.nports + 1, vectors);
3095         if (vectors < 0)
3096                 return vectors;
3097
3098         for (i = 0; i < vectors; ++i)
3099                 adap->msix_info[i].vec = entries[i].vector;
3100         adap->msix_nvectors = vectors;
3101
3102         return 0;
3103 }
3104
3105 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3106 {
3107         static const char *pci_variant[] = {
3108                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3109         };
3110
3111         int i;
3112         char buf[80];
3113
3114         if (is_pcie(adap))
3115                 snprintf(buf, sizeof(buf), "%s x%d",
3116                          pci_variant[adap->params.pci.variant],
3117                          adap->params.pci.width);
3118         else
3119                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3120                          pci_variant[adap->params.pci.variant],
3121                          adap->params.pci.speed, adap->params.pci.width);
3122
3123         for_each_port(adap, i) {
3124                 struct net_device *dev = adap->port[i];
3125                 const struct port_info *pi = netdev_priv(dev);
3126
3127                 if (!test_bit(i, &adap->registered_device_map))
3128                         continue;
3129                 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3130                             ai->desc, pi->phy.desc,
3131                             is_offload(adap) ? "R" : "", adap->params.rev, buf,
3132                             (adap->flags & USING_MSIX) ? " MSI-X" :
3133                             (adap->flags & USING_MSI) ? " MSI" : "");
3134                 if (adap->name == dev->name && adap->params.vpd.mclk)
3135                         pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3136                                adap->name, t3_mc7_size(&adap->cm) >> 20,
3137                                t3_mc7_size(&adap->pmtx) >> 20,
3138                                t3_mc7_size(&adap->pmrx) >> 20,
3139                                adap->params.vpd.sn);
3140         }
3141 }
3142
3143 static const struct net_device_ops cxgb_netdev_ops = {
3144         .ndo_open               = cxgb_open,
3145         .ndo_stop               = cxgb_close,
3146         .ndo_start_xmit         = t3_eth_xmit,
3147         .ndo_get_stats          = cxgb_get_stats,
3148         .ndo_validate_addr      = eth_validate_addr,
3149         .ndo_set_rx_mode        = cxgb_set_rxmode,
3150         .ndo_do_ioctl           = cxgb_ioctl,
3151         .ndo_change_mtu         = cxgb_change_mtu,
3152         .ndo_set_mac_address    = cxgb_set_mac_addr,
3153         .ndo_fix_features       = cxgb_fix_features,
3154         .ndo_set_features       = cxgb_set_features,
3155 #ifdef CONFIG_NET_POLL_CONTROLLER
3156         .ndo_poll_controller    = cxgb_netpoll,
3157 #endif
3158 };
3159
3160 static void cxgb3_init_iscsi_mac(struct net_device *dev)
3161 {
3162         struct port_info *pi = netdev_priv(dev);
3163
3164         memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3165         pi->iscsic.mac_addr[3] |= 0x80;
3166 }
3167
3168 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3169 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3170                         NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3171 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3172 {
3173         int i, err, pci_using_dac = 0;
3174         resource_size_t mmio_start, mmio_len;
3175         const struct adapter_info *ai;
3176         struct adapter *adapter = NULL;
3177         struct port_info *pi;
3178
3179         pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3180
3181         if (!cxgb3_wq) {
3182                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3183                 if (!cxgb3_wq) {
3184                         pr_err("cannot initialize work queue\n");
3185                         return -ENOMEM;
3186                 }
3187         }
3188
3189         err = pci_enable_device(pdev);
3190         if (err) {
3191                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3192                 goto out;
3193         }
3194
3195         err = pci_request_regions(pdev, DRV_NAME);
3196         if (err) {
3197                 /* Just info, some other driver may have claimed the device. */
3198                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3199                 goto out_disable_device;
3200         }
3201
3202         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3203                 pci_using_dac = 1;
3204                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3205                 if (err) {
3206                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3207                                "coherent allocations\n");
3208                         goto out_release_regions;
3209                 }
3210         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3211                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3212                 goto out_release_regions;
3213         }
3214
3215         pci_set_master(pdev);
3216         pci_save_state(pdev);
3217
3218         mmio_start = pci_resource_start(pdev, 0);
3219         mmio_len = pci_resource_len(pdev, 0);
3220         ai = t3_get_adapter_info(ent->driver_data);
3221
3222         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3223         if (!adapter) {
3224                 err = -ENOMEM;
3225                 goto out_release_regions;
3226         }
3227
3228         adapter->nofail_skb =
3229                 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3230         if (!adapter->nofail_skb) {
3231                 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3232                 err = -ENOMEM;
3233                 goto out_free_adapter;
3234         }
3235
3236         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3237         if (!adapter->regs) {
3238                 dev_err(&pdev->dev, "cannot map device registers\n");
3239                 err = -ENOMEM;
3240                 goto out_free_adapter;
3241         }
3242
3243         adapter->pdev = pdev;
3244         adapter->name = pci_name(pdev);
3245         adapter->msg_enable = dflt_msg_enable;
3246         adapter->mmio_len = mmio_len;
3247
3248         mutex_init(&adapter->mdio_lock);
3249         spin_lock_init(&adapter->work_lock);
3250         spin_lock_init(&adapter->stats_lock);
3251
3252         INIT_LIST_HEAD(&adapter->adapter_list);
3253         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3254         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3255
3256         INIT_WORK(&adapter->db_full_task, db_full_task);
3257         INIT_WORK(&adapter->db_empty_task, db_empty_task);
3258         INIT_WORK(&adapter->db_drop_task, db_drop_task);
3259
3260         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3261
3262         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3263                 struct net_device *netdev;
3264
3265                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3266                 if (!netdev) {
3267                         err = -ENOMEM;
3268                         goto out_free_dev;
3269                 }
3270
3271                 SET_NETDEV_DEV(netdev, &pdev->dev);
3272
3273                 adapter->port[i] = netdev;
3274                 pi = netdev_priv(netdev);
3275                 pi->adapter = adapter;
3276                 pi->port_id = i;
3277                 netif_carrier_off(netdev);
3278                 netdev->irq = pdev->irq;
3279                 netdev->mem_start = mmio_start;
3280                 netdev->mem_end = mmio_start + mmio_len - 1;
3281                 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3282                         NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3283                 netdev->features |= netdev->hw_features |
3284                                     NETIF_F_HW_VLAN_CTAG_TX;
3285                 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3286                 if (pci_using_dac)
3287                         netdev->features |= NETIF_F_HIGHDMA;
3288
3289                 netdev->netdev_ops = &cxgb_netdev_ops;
3290                 netdev->ethtool_ops = &cxgb_ethtool_ops;
3291         }
3292
3293         pci_set_drvdata(pdev, adapter);
3294         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3295                 err = -ENODEV;
3296                 goto out_free_dev;
3297         }
3298
3299         /*
3300          * The card is now ready to go.  If any errors occur during device
3301          * registration we do not fail the whole card but rather proceed only
3302          * with the ports we manage to register successfully.  However we must
3303          * register at least one net device.
3304          */
3305         for_each_port(adapter, i) {
3306                 err = register_netdev(adapter->port[i]);
3307                 if (err)
3308                         dev_warn(&pdev->dev,
3309                                  "cannot register net device %s, skipping\n",
3310                                  adapter->port[i]->name);
3311                 else {
3312                         /*
3313                          * Change the name we use for messages to the name of
3314                          * the first successfully registered interface.
3315                          */
3316                         if (!adapter->registered_device_map)
3317                                 adapter->name = adapter->port[i]->name;
3318
3319                         __set_bit(i, &adapter->registered_device_map);
3320                 }
3321         }
3322         if (!adapter->registered_device_map) {
3323                 dev_err(&pdev->dev, "could not register any net devices\n");
3324                 goto out_free_dev;
3325         }
3326
3327         for_each_port(adapter, i)
3328                 cxgb3_init_iscsi_mac(adapter->port[i]);
3329
3330         /* Driver's ready. Reflect it on LEDs */
3331         t3_led_ready(adapter);
3332
3333         if (is_offload(adapter)) {
3334                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3335                 cxgb3_adapter_ofld(adapter);
3336         }
3337
3338         /* See what interrupts we'll be using */
3339         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3340                 adapter->flags |= USING_MSIX;
3341         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3342                 adapter->flags |= USING_MSI;
3343
3344         set_nqsets(adapter);
3345
3346         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3347                                  &cxgb3_attr_group);
3348
3349         print_port_info(adapter, ai);
3350         return 0;
3351
3352 out_free_dev:
3353         iounmap(adapter->regs);
3354         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3355                 if (adapter->port[i])
3356                         free_netdev(adapter->port[i]);
3357
3358 out_free_adapter:
3359         kfree(adapter);
3360
3361 out_release_regions:
3362         pci_release_regions(pdev);
3363 out_disable_device:
3364         pci_disable_device(pdev);
3365 out:
3366         return err;
3367 }
3368
3369 static void remove_one(struct pci_dev *pdev)
3370 {
3371         struct adapter *adapter = pci_get_drvdata(pdev);
3372
3373         if (adapter) {
3374                 int i;
3375
3376                 t3_sge_stop(adapter);
3377                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3378                                    &cxgb3_attr_group);
3379
3380                 if (is_offload(adapter)) {
3381                         cxgb3_adapter_unofld(adapter);
3382                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3383                                      &adapter->open_device_map))
3384                                 offload_close(&adapter->tdev);
3385                 }
3386
3387                 for_each_port(adapter, i)
3388                     if (test_bit(i, &adapter->registered_device_map))
3389                         unregister_netdev(adapter->port[i]);
3390
3391                 t3_stop_sge_timers(adapter);
3392                 t3_free_sge_resources(adapter);
3393                 cxgb_disable_msi(adapter);
3394
3395                 for_each_port(adapter, i)
3396                         if (adapter->port[i])
3397                                 free_netdev(adapter->port[i]);
3398
3399                 iounmap(adapter->regs);
3400                 if (adapter->nofail_skb)
3401                         kfree_skb(adapter->nofail_skb);
3402                 kfree(adapter);
3403                 pci_release_regions(pdev);
3404                 pci_disable_device(pdev);
3405         }
3406 }
3407
3408 static struct pci_driver driver = {
3409         .name = DRV_NAME,
3410         .id_table = cxgb3_pci_tbl,
3411         .probe = init_one,
3412         .remove = remove_one,
3413         .err_handler = &t3_err_handler,
3414 };
3415
3416 static int __init cxgb3_init_module(void)
3417 {
3418         int ret;
3419
3420         cxgb3_offload_init();
3421
3422         ret = pci_register_driver(&driver);
3423         return ret;
3424 }
3425
3426 static void __exit cxgb3_cleanup_module(void)
3427 {
3428         pci_unregister_driver(&driver);
3429         if (cxgb3_wq)
3430                 destroy_workqueue(cxgb3_wq);
3431 }
3432
3433 module_init(cxgb3_init_module);
3434 module_exit(cxgb3_cleanup_module);