GNU Linux-libre 4.9-gnu1
[releases.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: QLogic Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  * Copyright (c) 2014 QLogic Corporation
5  * All rights reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12  * Written by: Eliezer Tamir
13  * Based on code from Michael Chan's bnx2 driver
14  * UDP CSUM errata workaround by Arik Gendelman
15  * Slowpath and fastpath rework by Vladislav Zolotarov
16  * Statistics and Link management by Yitchak Gertner
17  *
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/ip.h>
26 #include <linux/crash_dump.h>
27 #include <net/tcp.h>
28 #include <net/ipv6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/busy_poll.h>
31 #include <linux/prefetch.h>
32 #include "bnx2x_cmn.h"
33 #include "bnx2x_init.h"
34 #include "bnx2x_sp.h"
35
36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39 static int bnx2x_poll(struct napi_struct *napi, int budget);
40
41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42 {
43         int i;
44
45         /* Add NAPI objects */
46         for_each_rx_queue_cnic(bp, i) {
47                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48                                bnx2x_poll, NAPI_POLL_WEIGHT);
49         }
50 }
51
52 static void bnx2x_add_all_napi(struct bnx2x *bp)
53 {
54         int i;
55
56         /* Add NAPI objects */
57         for_each_eth_queue(bp, i) {
58                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
59                                bnx2x_poll, NAPI_POLL_WEIGHT);
60         }
61 }
62
63 static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 {
65         int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
66
67         /* Reduce memory usage in kdump environment by using only one queue */
68         if (is_kdump_kernel())
69                 nq = 1;
70
71         nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72         return nq;
73 }
74
75 /**
76  * bnx2x_move_fp - move content of the fastpath structure.
77  *
78  * @bp:         driver handle
79  * @from:       source FP index
80  * @to:         destination FP index
81  *
82  * Makes sure the contents of the bp->fp[to].napi is kept
83  * intact. This is done by first copying the napi struct from
84  * the target to the source, and then mem copying the entire
85  * source onto the target. Update txdata pointers and related
86  * content.
87  */
88 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89 {
90         struct bnx2x_fastpath *from_fp = &bp->fp[from];
91         struct bnx2x_fastpath *to_fp = &bp->fp[to];
92         struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93         struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94         struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95         struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
96         int old_max_eth_txqs, new_max_eth_txqs;
97         int old_txdata_index = 0, new_txdata_index = 0;
98         struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
99
100         /* Copy the NAPI object as it has been already initialized */
101         from_fp->napi = to_fp->napi;
102
103         /* Move bnx2x_fastpath contents */
104         memcpy(to_fp, from_fp, sizeof(*to_fp));
105         to_fp->index = to;
106
107         /* Retain the tpa_info of the original `to' version as we don't want
108          * 2 FPs to contain the same tpa_info pointer.
109          */
110         to_fp->tpa_info = old_tpa_info;
111
112         /* move sp_objs contents as well, as their indices match fp ones */
113         memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114
115         /* move fp_stats contents as well, as their indices match fp ones */
116         memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117
118         /* Update txdata pointers in fp and move txdata content accordingly:
119          * Each fp consumes 'max_cos' txdata structures, so the index should be
120          * decremented by max_cos x delta.
121          */
122
123         old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124         new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125                                 (bp)->max_cos;
126         if (from == FCOE_IDX(bp)) {
127                 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128                 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129         }
130
131         memcpy(&bp->bnx2x_txq[new_txdata_index],
132                &bp->bnx2x_txq[old_txdata_index],
133                sizeof(struct bnx2x_fp_txdata));
134         to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
135 }
136
137 /**
138  * bnx2x_fill_fw_str - Fill buffer with FW version string.
139  *
140  * @bp:        driver handle
141  * @buf:       character buffer to fill with the fw name
142  * @buf_len:   length of the above buffer
143  *
144  */
145 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146 {
147         if (IS_PF(bp)) {
148                 u8 phy_fw_ver[PHY_FW_VER_LEN];
149
150                 phy_fw_ver[0] = '\0';
151                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152                                              phy_fw_ver, PHY_FW_VER_LEN);
153                 strlcpy(buf, bp->fw_ver, buf_len);
154                 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155                          "bc %d.%d.%d%s%s",
156                          (bp->common.bc_ver & 0xff0000) >> 16,
157                          (bp->common.bc_ver & 0xff00) >> 8,
158                          (bp->common.bc_ver & 0xff),
159                          ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160         } else {
161                 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
162         }
163 }
164
165 /**
166  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167  *
168  * @bp: driver handle
169  * @delta:      number of eth queues which were not allocated
170  */
171 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172 {
173         int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174
175         /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
176          * backward along the array could cause memory to be overridden
177          */
178         for (cos = 1; cos < bp->max_cos; cos++) {
179                 for (i = 0; i < old_eth_num - delta; i++) {
180                         struct bnx2x_fastpath *fp = &bp->fp[i];
181                         int new_idx = cos * (old_eth_num - delta) + i;
182
183                         memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184                                sizeof(struct bnx2x_fp_txdata));
185                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186                 }
187         }
188 }
189
190 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
191
192 /* free skb in the packet ring at pos idx
193  * return idx of last bd freed
194  */
195 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
196                              u16 idx, unsigned int *pkts_compl,
197                              unsigned int *bytes_compl)
198 {
199         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
200         struct eth_tx_start_bd *tx_start_bd;
201         struct eth_tx_bd *tx_data_bd;
202         struct sk_buff *skb = tx_buf->skb;
203         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204         int nbd;
205         u16 split_bd_len = 0;
206
207         /* prefetch skb end pointer to speedup dev_kfree_skb() */
208         prefetch(&skb->end);
209
210         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
211            txdata->txq_index, idx, tx_buf, skb);
212
213         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
214
215         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216 #ifdef BNX2X_STOP_ON_ERROR
217         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218                 BNX2X_ERR("BAD nbd!\n");
219                 bnx2x_panic();
220         }
221 #endif
222         new_cons = nbd + tx_buf->first_bd;
223
224         /* Get the next bd */
225         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227         /* Skip a parse bd... */
228         --nbd;
229         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230
231         if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232                 /* Skip second parse bd... */
233                 --nbd;
234                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235         }
236
237         /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
238         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
239                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240                 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
241                 --nbd;
242                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243         }
244
245         /* unmap first bd */
246         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247                          BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248                          DMA_TO_DEVICE);
249
250         /* now free frags */
251         while (nbd > 0) {
252
253                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
254                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256                 if (--nbd)
257                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258         }
259
260         /* release skb */
261         WARN_ON(!skb);
262         if (likely(skb)) {
263                 (*pkts_compl)++;
264                 (*bytes_compl) += skb->len;
265                 dev_kfree_skb_any(skb);
266         }
267
268         tx_buf->first_bd = 0;
269         tx_buf->skb = NULL;
270
271         return new_cons;
272 }
273
274 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
275 {
276         struct netdev_queue *txq;
277         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
278         unsigned int pkts_compl = 0, bytes_compl = 0;
279
280 #ifdef BNX2X_STOP_ON_ERROR
281         if (unlikely(bp->panic))
282                 return -1;
283 #endif
284
285         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287         sw_cons = txdata->tx_pkt_cons;
288
289         while (sw_cons != hw_cons) {
290                 u16 pkt_cons;
291
292                 pkt_cons = TX_BD(sw_cons);
293
294                 DP(NETIF_MSG_TX_DONE,
295                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
296                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
297
298                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
299                                             &pkts_compl, &bytes_compl);
300
301                 sw_cons++;
302         }
303
304         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305
306         txdata->tx_pkt_cons = sw_cons;
307         txdata->tx_bd_cons = bd_cons;
308
309         /* Need to make the tx_bd_cons update visible to start_xmit()
310          * before checking for netif_tx_queue_stopped().  Without the
311          * memory barrier, there is a small possibility that
312          * start_xmit() will miss it and cause the queue to be stopped
313          * forever.
314          * On the other hand we need an rmb() here to ensure the proper
315          * ordering of bit testing in the following
316          * netif_tx_queue_stopped(txq) call.
317          */
318         smp_mb();
319
320         if (unlikely(netif_tx_queue_stopped(txq))) {
321                 /* Taking tx_lock() is needed to prevent re-enabling the queue
322                  * while it's empty. This could have happen if rx_action() gets
323                  * suspended in bnx2x_tx_int() after the condition before
324                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325                  *
326                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
327                  * sends some packets consuming the whole queue again->
328                  * stops the queue
329                  */
330
331                 __netif_tx_lock(txq, smp_processor_id());
332
333                 if ((netif_tx_queue_stopped(txq)) &&
334                     (bp->state == BNX2X_STATE_OPEN) &&
335                     (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
336                         netif_tx_wake_queue(txq);
337
338                 __netif_tx_unlock(txq);
339         }
340         return 0;
341 }
342
343 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
344                                              u16 idx)
345 {
346         u16 last_max = fp->last_max_sge;
347
348         if (SUB_S16(idx, last_max) > 0)
349                 fp->last_max_sge = idx;
350 }
351
352 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353                                          u16 sge_len,
354                                          struct eth_end_agg_rx_cqe *cqe)
355 {
356         struct bnx2x *bp = fp->bp;
357         u16 last_max, last_elem, first_elem;
358         u16 delta = 0;
359         u16 i;
360
361         if (!sge_len)
362                 return;
363
364         /* First mark all used pages */
365         for (i = 0; i < sge_len; i++)
366                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
367                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
368
369         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
370            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
371
372         /* Here we assume that the last SGE index is the biggest */
373         prefetch((void *)(fp->sge_mask));
374         bnx2x_update_last_max_sge(fp,
375                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
376
377         last_max = RX_SGE(fp->last_max_sge);
378         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
380
381         /* If ring is not full */
382         if (last_elem + 1 != first_elem)
383                 last_elem++;
384
385         /* Now update the prod */
386         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387                 if (likely(fp->sge_mask[i]))
388                         break;
389
390                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391                 delta += BIT_VEC64_ELEM_SZ;
392         }
393
394         if (delta > 0) {
395                 fp->rx_sge_prod += delta;
396                 /* clear page-end entries */
397                 bnx2x_clear_sge_mask_next_elems(fp);
398         }
399
400         DP(NETIF_MSG_RX_STATUS,
401            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
402            fp->last_max_sge, fp->rx_sge_prod);
403 }
404
405 /* Get Toeplitz hash value in the skb using the value from the
406  * CQE (calculated by HW).
407  */
408 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
409                             const struct eth_fast_path_rx_cqe *cqe,
410                             enum pkt_hash_types *rxhash_type)
411 {
412         /* Get Toeplitz hash from CQE */
413         if ((bp->dev->features & NETIF_F_RXHASH) &&
414             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415                 enum eth_rss_hash_type htype;
416
417                 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
418                 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419                                 (htype == TCP_IPV6_HASH_TYPE)) ?
420                                PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421
422                 return le32_to_cpu(cqe->rss_hash_result);
423         }
424         *rxhash_type = PKT_HASH_TYPE_NONE;
425         return 0;
426 }
427
428 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
429                             u16 cons, u16 prod,
430                             struct eth_fast_path_rx_cqe *cqe)
431 {
432         struct bnx2x *bp = fp->bp;
433         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436         dma_addr_t mapping;
437         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
439
440         /* print error if current state != stop */
441         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
442                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443
444         /* Try to map an empty data buffer from the aggregation info  */
445         mapping = dma_map_single(&bp->pdev->dev,
446                                  first_buf->data + NET_SKB_PAD,
447                                  fp->rx_buf_size, DMA_FROM_DEVICE);
448         /*
449          *  ...if it fails - move the skb from the consumer to the producer
450          *  and set the current aggregation state as ERROR to drop it
451          *  when TPA_STOP arrives.
452          */
453
454         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455                 /* Move the BD from the consumer to the producer */
456                 bnx2x_reuse_rx_data(fp, cons, prod);
457                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
458                 return;
459         }
460
461         /* move empty data from pool to prod */
462         prod_rx_buf->data = first_buf->data;
463         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
464         /* point prod_bd to new data */
465         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467
468         /* move partial skb from cons to pool (don't unmap yet) */
469         *first_buf = *cons_rx_buf;
470
471         /* mark bin state as START */
472         tpa_info->parsing_flags =
473                 le16_to_cpu(cqe->pars_flags.flags);
474         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475         tpa_info->tpa_state = BNX2X_TPA_START;
476         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477         tpa_info->placement_offset = cqe->placement_offset;
478         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
479         if (fp->mode == TPA_MODE_GRO) {
480                 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
481                 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
482                 tpa_info->gro_size = gro_size;
483         }
484
485 #ifdef BNX2X_STOP_ON_ERROR
486         fp->tpa_queue_used |= (1 << queue);
487         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
488            fp->tpa_queue_used);
489 #endif
490 }
491
492 /* Timestamp option length allowed for TPA aggregation:
493  *
494  *              nop nop kind length echo val
495  */
496 #define TPA_TSTAMP_OPT_LEN      12
497 /**
498  * bnx2x_set_gro_params - compute GRO values
499  *
500  * @skb:                packet skb
501  * @parsing_flags:      parsing flags from the START CQE
502  * @len_on_bd:          total length of the first packet for the
503  *                      aggregation.
504  * @pkt_len:            length of all segments
505  *
506  * Approximate value of the MSS for this aggregation calculated using
507  * the first packet of it.
508  * Compute number of aggregated segments, and gso_type.
509  */
510 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
511                                  u16 len_on_bd, unsigned int pkt_len,
512                                  u16 num_of_coalesced_segs)
513 {
514         /* TPA aggregation won't have either IP options or TCP options
515          * other than timestamp or IPv6 extension headers.
516          */
517         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518
519         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
520             PRS_FLAG_OVERETH_IPV6) {
521                 hdrs_len += sizeof(struct ipv6hdr);
522                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523         } else {
524                 hdrs_len += sizeof(struct iphdr);
525                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
526         }
527
528         /* Check if there was a TCP timestamp, if there is it's will
529          * always be 12 bytes length: nop nop kind length echo val.
530          *
531          * Otherwise FW would close the aggregation.
532          */
533         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534                 hdrs_len += TPA_TSTAMP_OPT_LEN;
535
536         skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537
538         /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539          * to skb_shinfo(skb)->gso_segs
540          */
541         NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
542 }
543
544 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545                               u16 index, gfp_t gfp_mask)
546 {
547         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
548         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
549         struct bnx2x_alloc_pool *pool = &fp->page_pool;
550         dma_addr_t mapping;
551
552         if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
553
554                 /* put page reference used by the memory pool, since we
555                  * won't be using this page as the mempool anymore.
556                  */
557                 if (pool->page)
558                         put_page(pool->page);
559
560                 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
561                 if (unlikely(!pool->page))
562                         return -ENOMEM;
563
564                 pool->offset = 0;
565         }
566
567         mapping = dma_map_page(&bp->pdev->dev, pool->page,
568                                pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
569         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
570                 BNX2X_ERR("Can't map sge\n");
571                 return -ENOMEM;
572         }
573
574         get_page(pool->page);
575         sw_buf->page = pool->page;
576         sw_buf->offset = pool->offset;
577
578         dma_unmap_addr_set(sw_buf, mapping, mapping);
579
580         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
581         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
582
583         pool->offset += SGE_PAGE_SIZE;
584
585         return 0;
586 }
587
588 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
589                                struct bnx2x_agg_info *tpa_info,
590                                u16 pages,
591                                struct sk_buff *skb,
592                                struct eth_end_agg_rx_cqe *cqe,
593                                u16 cqe_idx)
594 {
595         struct sw_rx_page *rx_pg, old_rx_pg;
596         u32 i, frag_len, frag_size;
597         int err, j, frag_id = 0;
598         u16 len_on_bd = tpa_info->len_on_bd;
599         u16 full_page = 0, gro_size = 0;
600
601         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
602
603         if (fp->mode == TPA_MODE_GRO) {
604                 gro_size = tpa_info->gro_size;
605                 full_page = tpa_info->full_page;
606         }
607
608         /* This is needed in order to enable forwarding support */
609         if (frag_size)
610                 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
611                                      le16_to_cpu(cqe->pkt_len),
612                                      le16_to_cpu(cqe->num_of_coalesced_segs));
613
614 #ifdef BNX2X_STOP_ON_ERROR
615         if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
616                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
617                           pages, cqe_idx);
618                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
619                 bnx2x_panic();
620                 return -EINVAL;
621         }
622 #endif
623
624         /* Run through the SGL and compose the fragmented skb */
625         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
626                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
627
628                 /* FW gives the indices of the SGE as if the ring is an array
629                    (meaning that "next" element will consume 2 indices) */
630                 if (fp->mode == TPA_MODE_GRO)
631                         frag_len = min_t(u32, frag_size, (u32)full_page);
632                 else /* LRO */
633                         frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
634
635                 rx_pg = &fp->rx_page_ring[sge_idx];
636                 old_rx_pg = *rx_pg;
637
638                 /* If we fail to allocate a substitute page, we simply stop
639                    where we are and drop the whole packet */
640                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
641                 if (unlikely(err)) {
642                         bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
643                         return err;
644                 }
645
646                 dma_unmap_page(&bp->pdev->dev,
647                                dma_unmap_addr(&old_rx_pg, mapping),
648                                SGE_PAGE_SIZE, DMA_FROM_DEVICE);
649                 /* Add one frag and update the appropriate fields in the skb */
650                 if (fp->mode == TPA_MODE_LRO)
651                         skb_fill_page_desc(skb, j, old_rx_pg.page,
652                                            old_rx_pg.offset, frag_len);
653                 else { /* GRO */
654                         int rem;
655                         int offset = 0;
656                         for (rem = frag_len; rem > 0; rem -= gro_size) {
657                                 int len = rem > gro_size ? gro_size : rem;
658                                 skb_fill_page_desc(skb, frag_id++,
659                                                    old_rx_pg.page,
660                                                    old_rx_pg.offset + offset,
661                                                    len);
662                                 if (offset)
663                                         get_page(old_rx_pg.page);
664                                 offset += len;
665                         }
666                 }
667
668                 skb->data_len += frag_len;
669                 skb->truesize += SGE_PAGES;
670                 skb->len += frag_len;
671
672                 frag_size -= frag_len;
673         }
674
675         return 0;
676 }
677
678 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
679 {
680         if (fp->rx_frag_size)
681                 skb_free_frag(data);
682         else
683                 kfree(data);
684 }
685
686 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
687 {
688         if (fp->rx_frag_size) {
689                 /* GFP_KERNEL allocations are used only during initialization */
690                 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
691                         return (void *)__get_free_page(gfp_mask);
692
693                 return netdev_alloc_frag(fp->rx_frag_size);
694         }
695
696         return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
697 }
698
699 #ifdef CONFIG_INET
700 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
701 {
702         const struct iphdr *iph = ip_hdr(skb);
703         struct tcphdr *th;
704
705         skb_set_transport_header(skb, sizeof(struct iphdr));
706         th = tcp_hdr(skb);
707
708         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
709                                   iph->saddr, iph->daddr, 0);
710 }
711
712 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
713 {
714         struct ipv6hdr *iph = ipv6_hdr(skb);
715         struct tcphdr *th;
716
717         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
718         th = tcp_hdr(skb);
719
720         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
721                                   &iph->saddr, &iph->daddr, 0);
722 }
723
724 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
725                             void (*gro_func)(struct bnx2x*, struct sk_buff*))
726 {
727         skb_set_network_header(skb, 0);
728         gro_func(bp, skb);
729         tcp_gro_complete(skb);
730 }
731 #endif
732
733 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
734                                struct sk_buff *skb)
735 {
736 #ifdef CONFIG_INET
737         if (skb_shinfo(skb)->gso_size) {
738                 switch (be16_to_cpu(skb->protocol)) {
739                 case ETH_P_IP:
740                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
741                         break;
742                 case ETH_P_IPV6:
743                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
744                         break;
745                 default:
746                         WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
747                                   be16_to_cpu(skb->protocol));
748                 }
749         }
750 #endif
751         skb_record_rx_queue(skb, fp->rx_queue);
752         napi_gro_receive(&fp->napi, skb);
753 }
754
755 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
756                            struct bnx2x_agg_info *tpa_info,
757                            u16 pages,
758                            struct eth_end_agg_rx_cqe *cqe,
759                            u16 cqe_idx)
760 {
761         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
762         u8 pad = tpa_info->placement_offset;
763         u16 len = tpa_info->len_on_bd;
764         struct sk_buff *skb = NULL;
765         u8 *new_data, *data = rx_buf->data;
766         u8 old_tpa_state = tpa_info->tpa_state;
767
768         tpa_info->tpa_state = BNX2X_TPA_STOP;
769
770         /* If we there was an error during the handling of the TPA_START -
771          * drop this aggregation.
772          */
773         if (old_tpa_state == BNX2X_TPA_ERROR)
774                 goto drop;
775
776         /* Try to allocate the new data */
777         new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
778         /* Unmap skb in the pool anyway, as we are going to change
779            pool entry status to BNX2X_TPA_STOP even if new skb allocation
780            fails. */
781         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
782                          fp->rx_buf_size, DMA_FROM_DEVICE);
783         if (likely(new_data))
784                 skb = build_skb(data, fp->rx_frag_size);
785
786         if (likely(skb)) {
787 #ifdef BNX2X_STOP_ON_ERROR
788                 if (pad + len > fp->rx_buf_size) {
789                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
790                                   pad, len, fp->rx_buf_size);
791                         bnx2x_panic();
792                         return;
793                 }
794 #endif
795
796                 skb_reserve(skb, pad + NET_SKB_PAD);
797                 skb_put(skb, len);
798                 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
799
800                 skb->protocol = eth_type_trans(skb, bp->dev);
801                 skb->ip_summed = CHECKSUM_UNNECESSARY;
802
803                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
804                                          skb, cqe, cqe_idx)) {
805                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
806                                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
807                         bnx2x_gro_receive(bp, fp, skb);
808                 } else {
809                         DP(NETIF_MSG_RX_STATUS,
810                            "Failed to allocate new pages - dropping packet!\n");
811                         dev_kfree_skb_any(skb);
812                 }
813
814                 /* put new data in bin */
815                 rx_buf->data = new_data;
816
817                 return;
818         }
819         if (new_data)
820                 bnx2x_frag_free(fp, new_data);
821 drop:
822         /* drop the packet and keep the buffer in the bin */
823         DP(NETIF_MSG_RX_STATUS,
824            "Failed to allocate or map a new skb - dropping packet!\n");
825         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
826 }
827
828 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
829                                u16 index, gfp_t gfp_mask)
830 {
831         u8 *data;
832         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
833         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
834         dma_addr_t mapping;
835
836         data = bnx2x_frag_alloc(fp, gfp_mask);
837         if (unlikely(data == NULL))
838                 return -ENOMEM;
839
840         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
841                                  fp->rx_buf_size,
842                                  DMA_FROM_DEVICE);
843         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
844                 bnx2x_frag_free(fp, data);
845                 BNX2X_ERR("Can't map rx data\n");
846                 return -ENOMEM;
847         }
848
849         rx_buf->data = data;
850         dma_unmap_addr_set(rx_buf, mapping, mapping);
851
852         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
853         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
854
855         return 0;
856 }
857
858 static
859 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
860                                  struct bnx2x_fastpath *fp,
861                                  struct bnx2x_eth_q_stats *qstats)
862 {
863         /* Do nothing if no L4 csum validation was done.
864          * We do not check whether IP csum was validated. For IPv4 we assume
865          * that if the card got as far as validating the L4 csum, it also
866          * validated the IP csum. IPv6 has no IP csum.
867          */
868         if (cqe->fast_path_cqe.status_flags &
869             ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
870                 return;
871
872         /* If L4 validation was done, check if an error was found. */
873
874         if (cqe->fast_path_cqe.type_error_flags &
875             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
876              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
877                 qstats->hw_csum_err++;
878         else
879                 skb->ip_summed = CHECKSUM_UNNECESSARY;
880 }
881
882 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
883 {
884         struct bnx2x *bp = fp->bp;
885         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
886         u16 sw_comp_cons, sw_comp_prod;
887         int rx_pkt = 0;
888         union eth_rx_cqe *cqe;
889         struct eth_fast_path_rx_cqe *cqe_fp;
890
891 #ifdef BNX2X_STOP_ON_ERROR
892         if (unlikely(bp->panic))
893                 return 0;
894 #endif
895         if (budget <= 0)
896                 return rx_pkt;
897
898         bd_cons = fp->rx_bd_cons;
899         bd_prod = fp->rx_bd_prod;
900         bd_prod_fw = bd_prod;
901         sw_comp_cons = fp->rx_comp_cons;
902         sw_comp_prod = fp->rx_comp_prod;
903
904         comp_ring_cons = RCQ_BD(sw_comp_cons);
905         cqe = &fp->rx_comp_ring[comp_ring_cons];
906         cqe_fp = &cqe->fast_path_cqe;
907
908         DP(NETIF_MSG_RX_STATUS,
909            "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
910
911         while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
912                 struct sw_rx_bd *rx_buf = NULL;
913                 struct sk_buff *skb;
914                 u8 cqe_fp_flags;
915                 enum eth_rx_cqe_type cqe_fp_type;
916                 u16 len, pad, queue;
917                 u8 *data;
918                 u32 rxhash;
919                 enum pkt_hash_types rxhash_type;
920
921 #ifdef BNX2X_STOP_ON_ERROR
922                 if (unlikely(bp->panic))
923                         return 0;
924 #endif
925
926                 bd_prod = RX_BD(bd_prod);
927                 bd_cons = RX_BD(bd_cons);
928
929                 /* A rmb() is required to ensure that the CQE is not read
930                  * before it is written by the adapter DMA.  PCI ordering
931                  * rules will make sure the other fields are written before
932                  * the marker at the end of struct eth_fast_path_rx_cqe
933                  * but without rmb() a weakly ordered processor can process
934                  * stale data.  Without the barrier TPA state-machine might
935                  * enter inconsistent state and kernel stack might be
936                  * provided with incorrect packet description - these lead
937                  * to various kernel crashed.
938                  */
939                 rmb();
940
941                 cqe_fp_flags = cqe_fp->type_error_flags;
942                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
943
944                 DP(NETIF_MSG_RX_STATUS,
945                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
946                    CQE_TYPE(cqe_fp_flags),
947                    cqe_fp_flags, cqe_fp->status_flags,
948                    le32_to_cpu(cqe_fp->rss_hash_result),
949                    le16_to_cpu(cqe_fp->vlan_tag),
950                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
951
952                 /* is this a slowpath msg? */
953                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
954                         bnx2x_sp_event(fp, cqe);
955                         goto next_cqe;
956                 }
957
958                 rx_buf = &fp->rx_buf_ring[bd_cons];
959                 data = rx_buf->data;
960
961                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
962                         struct bnx2x_agg_info *tpa_info;
963                         u16 frag_size, pages;
964 #ifdef BNX2X_STOP_ON_ERROR
965                         /* sanity check */
966                         if (fp->mode == TPA_MODE_DISABLED &&
967                             (CQE_TYPE_START(cqe_fp_type) ||
968                              CQE_TYPE_STOP(cqe_fp_type)))
969                                 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
970                                           CQE_TYPE(cqe_fp_type));
971 #endif
972
973                         if (CQE_TYPE_START(cqe_fp_type)) {
974                                 u16 queue = cqe_fp->queue_index;
975                                 DP(NETIF_MSG_RX_STATUS,
976                                    "calling tpa_start on queue %d\n",
977                                    queue);
978
979                                 bnx2x_tpa_start(fp, queue,
980                                                 bd_cons, bd_prod,
981                                                 cqe_fp);
982
983                                 goto next_rx;
984                         }
985                         queue = cqe->end_agg_cqe.queue_index;
986                         tpa_info = &fp->tpa_info[queue];
987                         DP(NETIF_MSG_RX_STATUS,
988                            "calling tpa_stop on queue %d\n",
989                            queue);
990
991                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
992                                     tpa_info->len_on_bd;
993
994                         if (fp->mode == TPA_MODE_GRO)
995                                 pages = (frag_size + tpa_info->full_page - 1) /
996                                          tpa_info->full_page;
997                         else
998                                 pages = SGE_PAGE_ALIGN(frag_size) >>
999                                         SGE_PAGE_SHIFT;
1000
1001                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1002                                        &cqe->end_agg_cqe, comp_ring_cons);
1003 #ifdef BNX2X_STOP_ON_ERROR
1004                         if (bp->panic)
1005                                 return 0;
1006 #endif
1007
1008                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1009                         goto next_cqe;
1010                 }
1011                 /* non TPA */
1012                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1013                 pad = cqe_fp->placement_offset;
1014                 dma_sync_single_for_cpu(&bp->pdev->dev,
1015                                         dma_unmap_addr(rx_buf, mapping),
1016                                         pad + RX_COPY_THRESH,
1017                                         DMA_FROM_DEVICE);
1018                 pad += NET_SKB_PAD;
1019                 prefetch(data + pad); /* speedup eth_type_trans() */
1020                 /* is this an error packet? */
1021                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1022                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1023                            "ERROR  flags %x  rx packet %u\n",
1024                            cqe_fp_flags, sw_comp_cons);
1025                         bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1026                         goto reuse_rx;
1027                 }
1028
1029                 /* Since we don't have a jumbo ring
1030                  * copy small packets if mtu > 1500
1031                  */
1032                 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1033                     (len <= RX_COPY_THRESH)) {
1034                         skb = napi_alloc_skb(&fp->napi, len);
1035                         if (skb == NULL) {
1036                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1037                                    "ERROR  packet dropped because of alloc failure\n");
1038                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1039                                 goto reuse_rx;
1040                         }
1041                         memcpy(skb->data, data + pad, len);
1042                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1043                 } else {
1044                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1045                                                        GFP_ATOMIC) == 0)) {
1046                                 dma_unmap_single(&bp->pdev->dev,
1047                                                  dma_unmap_addr(rx_buf, mapping),
1048                                                  fp->rx_buf_size,
1049                                                  DMA_FROM_DEVICE);
1050                                 skb = build_skb(data, fp->rx_frag_size);
1051                                 if (unlikely(!skb)) {
1052                                         bnx2x_frag_free(fp, data);
1053                                         bnx2x_fp_qstats(bp, fp)->
1054                                                         rx_skb_alloc_failed++;
1055                                         goto next_rx;
1056                                 }
1057                                 skb_reserve(skb, pad);
1058                         } else {
1059                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1060                                    "ERROR  packet dropped because of alloc failure\n");
1061                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1062 reuse_rx:
1063                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1064                                 goto next_rx;
1065                         }
1066                 }
1067
1068                 skb_put(skb, len);
1069                 skb->protocol = eth_type_trans(skb, bp->dev);
1070
1071                 /* Set Toeplitz hash for a none-LRO skb */
1072                 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1073                 skb_set_hash(skb, rxhash, rxhash_type);
1074
1075                 skb_checksum_none_assert(skb);
1076
1077                 if (bp->dev->features & NETIF_F_RXCSUM)
1078                         bnx2x_csum_validate(skb, cqe, fp,
1079                                             bnx2x_fp_qstats(bp, fp));
1080
1081                 skb_record_rx_queue(skb, fp->rx_queue);
1082
1083                 /* Check if this packet was timestamped */
1084                 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1085                              (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1086                         bnx2x_set_rx_ts(bp, skb);
1087
1088                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1089                     PARSING_FLAGS_VLAN)
1090                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1091                                                le16_to_cpu(cqe_fp->vlan_tag));
1092
1093                 napi_gro_receive(&fp->napi, skb);
1094 next_rx:
1095                 rx_buf->data = NULL;
1096
1097                 bd_cons = NEXT_RX_IDX(bd_cons);
1098                 bd_prod = NEXT_RX_IDX(bd_prod);
1099                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1100                 rx_pkt++;
1101 next_cqe:
1102                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1103                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1104
1105                 /* mark CQE as free */
1106                 BNX2X_SEED_CQE(cqe_fp);
1107
1108                 if (rx_pkt == budget)
1109                         break;
1110
1111                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1112                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1113                 cqe_fp = &cqe->fast_path_cqe;
1114         } /* while */
1115
1116         fp->rx_bd_cons = bd_cons;
1117         fp->rx_bd_prod = bd_prod_fw;
1118         fp->rx_comp_cons = sw_comp_cons;
1119         fp->rx_comp_prod = sw_comp_prod;
1120
1121         /* Update producers */
1122         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1123                              fp->rx_sge_prod);
1124
1125         return rx_pkt;
1126 }
1127
1128 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1129 {
1130         struct bnx2x_fastpath *fp = fp_cookie;
1131         struct bnx2x *bp = fp->bp;
1132         u8 cos;
1133
1134         DP(NETIF_MSG_INTR,
1135            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1136            fp->index, fp->fw_sb_id, fp->igu_sb_id);
1137
1138         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1139
1140 #ifdef BNX2X_STOP_ON_ERROR
1141         if (unlikely(bp->panic))
1142                 return IRQ_HANDLED;
1143 #endif
1144
1145         /* Handle Rx and Tx according to MSI-X vector */
1146         for_each_cos_in_tx_queue(fp, cos)
1147                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1148
1149         prefetch(&fp->sb_running_index[SM_RX_ID]);
1150         napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1151
1152         return IRQ_HANDLED;
1153 }
1154
1155 /* HW Lock for shared dual port PHYs */
1156 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1157 {
1158         mutex_lock(&bp->port.phy_mutex);
1159
1160         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1161 }
1162
1163 void bnx2x_release_phy_lock(struct bnx2x *bp)
1164 {
1165         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1166
1167         mutex_unlock(&bp->port.phy_mutex);
1168 }
1169
1170 /* calculates MF speed according to current linespeed and MF configuration */
1171 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1172 {
1173         u16 line_speed = bp->link_vars.line_speed;
1174         if (IS_MF(bp)) {
1175                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1176                                                    bp->mf_config[BP_VN(bp)]);
1177
1178                 /* Calculate the current MAX line speed limit for the MF
1179                  * devices
1180                  */
1181                 if (IS_MF_PERCENT_BW(bp))
1182                         line_speed = (line_speed * maxCfg) / 100;
1183                 else { /* SD mode */
1184                         u16 vn_max_rate = maxCfg * 100;
1185
1186                         if (vn_max_rate < line_speed)
1187                                 line_speed = vn_max_rate;
1188                 }
1189         }
1190
1191         return line_speed;
1192 }
1193
1194 /**
1195  * bnx2x_fill_report_data - fill link report data to report
1196  *
1197  * @bp:         driver handle
1198  * @data:       link state to update
1199  *
1200  * It uses a none-atomic bit operations because is called under the mutex.
1201  */
1202 static void bnx2x_fill_report_data(struct bnx2x *bp,
1203                                    struct bnx2x_link_report_data *data)
1204 {
1205         memset(data, 0, sizeof(*data));
1206
1207         if (IS_PF(bp)) {
1208                 /* Fill the report data: effective line speed */
1209                 data->line_speed = bnx2x_get_mf_speed(bp);
1210
1211                 /* Link is down */
1212                 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1213                         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1214                                   &data->link_report_flags);
1215
1216                 if (!BNX2X_NUM_ETH_QUEUES(bp))
1217                         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1218                                   &data->link_report_flags);
1219
1220                 /* Full DUPLEX */
1221                 if (bp->link_vars.duplex == DUPLEX_FULL)
1222                         __set_bit(BNX2X_LINK_REPORT_FD,
1223                                   &data->link_report_flags);
1224
1225                 /* Rx Flow Control is ON */
1226                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1227                         __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1228                                   &data->link_report_flags);
1229
1230                 /* Tx Flow Control is ON */
1231                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1232                         __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1233                                   &data->link_report_flags);
1234         } else { /* VF */
1235                 *data = bp->vf_link_vars;
1236         }
1237 }
1238
1239 /**
1240  * bnx2x_link_report - report link status to OS.
1241  *
1242  * @bp:         driver handle
1243  *
1244  * Calls the __bnx2x_link_report() under the same locking scheme
1245  * as a link/PHY state managing code to ensure a consistent link
1246  * reporting.
1247  */
1248
1249 void bnx2x_link_report(struct bnx2x *bp)
1250 {
1251         bnx2x_acquire_phy_lock(bp);
1252         __bnx2x_link_report(bp);
1253         bnx2x_release_phy_lock(bp);
1254 }
1255
1256 /**
1257  * __bnx2x_link_report - report link status to OS.
1258  *
1259  * @bp:         driver handle
1260  *
1261  * None atomic implementation.
1262  * Should be called under the phy_lock.
1263  */
1264 void __bnx2x_link_report(struct bnx2x *bp)
1265 {
1266         struct bnx2x_link_report_data cur_data;
1267
1268         /* reread mf_cfg */
1269         if (IS_PF(bp) && !CHIP_IS_E1(bp))
1270                 bnx2x_read_mf_cfg(bp);
1271
1272         /* Read the current link report info */
1273         bnx2x_fill_report_data(bp, &cur_data);
1274
1275         /* Don't report link down or exactly the same link status twice */
1276         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1277             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1278                       &bp->last_reported_link.link_report_flags) &&
1279              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1280                       &cur_data.link_report_flags)))
1281                 return;
1282
1283         bp->link_cnt++;
1284
1285         /* We are going to report a new link parameters now -
1286          * remember the current data for the next time.
1287          */
1288         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1289
1290         /* propagate status to VFs */
1291         if (IS_PF(bp))
1292                 bnx2x_iov_link_update(bp);
1293
1294         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1295                      &cur_data.link_report_flags)) {
1296                 netif_carrier_off(bp->dev);
1297                 netdev_err(bp->dev, "NIC Link is Down\n");
1298                 return;
1299         } else {
1300                 const char *duplex;
1301                 const char *flow;
1302
1303                 netif_carrier_on(bp->dev);
1304
1305                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1306                                        &cur_data.link_report_flags))
1307                         duplex = "full";
1308                 else
1309                         duplex = "half";
1310
1311                 /* Handle the FC at the end so that only these flags would be
1312                  * possibly set. This way we may easily check if there is no FC
1313                  * enabled.
1314                  */
1315                 if (cur_data.link_report_flags) {
1316                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1317                                      &cur_data.link_report_flags)) {
1318                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1319                                      &cur_data.link_report_flags))
1320                                         flow = "ON - receive & transmit";
1321                                 else
1322                                         flow = "ON - receive";
1323                         } else {
1324                                 flow = "ON - transmit";
1325                         }
1326                 } else {
1327                         flow = "none";
1328                 }
1329                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1330                             cur_data.line_speed, duplex, flow);
1331         }
1332 }
1333
1334 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1335 {
1336         int i;
1337
1338         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1339                 struct eth_rx_sge *sge;
1340
1341                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1342                 sge->addr_hi =
1343                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1344                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1345
1346                 sge->addr_lo =
1347                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1348                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1349         }
1350 }
1351
1352 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1353                                 struct bnx2x_fastpath *fp, int last)
1354 {
1355         int i;
1356
1357         for (i = 0; i < last; i++) {
1358                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1359                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1360                 u8 *data = first_buf->data;
1361
1362                 if (data == NULL) {
1363                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1364                         continue;
1365                 }
1366                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1367                         dma_unmap_single(&bp->pdev->dev,
1368                                          dma_unmap_addr(first_buf, mapping),
1369                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1370                 bnx2x_frag_free(fp, data);
1371                 first_buf->data = NULL;
1372         }
1373 }
1374
1375 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1376 {
1377         int j;
1378
1379         for_each_rx_queue_cnic(bp, j) {
1380                 struct bnx2x_fastpath *fp = &bp->fp[j];
1381
1382                 fp->rx_bd_cons = 0;
1383
1384                 /* Activate BD ring */
1385                 /* Warning!
1386                  * this will generate an interrupt (to the TSTORM)
1387                  * must only be done after chip is initialized
1388                  */
1389                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1390                                      fp->rx_sge_prod);
1391         }
1392 }
1393
1394 void bnx2x_init_rx_rings(struct bnx2x *bp)
1395 {
1396         int func = BP_FUNC(bp);
1397         u16 ring_prod;
1398         int i, j;
1399
1400         /* Allocate TPA resources */
1401         for_each_eth_queue(bp, j) {
1402                 struct bnx2x_fastpath *fp = &bp->fp[j];
1403
1404                 DP(NETIF_MSG_IFUP,
1405                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1406
1407                 if (fp->mode != TPA_MODE_DISABLED) {
1408                         /* Fill the per-aggregation pool */
1409                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1410                                 struct bnx2x_agg_info *tpa_info =
1411                                         &fp->tpa_info[i];
1412                                 struct sw_rx_bd *first_buf =
1413                                         &tpa_info->first_buf;
1414
1415                                 first_buf->data =
1416                                         bnx2x_frag_alloc(fp, GFP_KERNEL);
1417                                 if (!first_buf->data) {
1418                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1419                                                   j);
1420                                         bnx2x_free_tpa_pool(bp, fp, i);
1421                                         fp->mode = TPA_MODE_DISABLED;
1422                                         break;
1423                                 }
1424                                 dma_unmap_addr_set(first_buf, mapping, 0);
1425                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1426                         }
1427
1428                         /* "next page" elements initialization */
1429                         bnx2x_set_next_page_sgl(fp);
1430
1431                         /* set SGEs bit mask */
1432                         bnx2x_init_sge_ring_bit_mask(fp);
1433
1434                         /* Allocate SGEs and initialize the ring elements */
1435                         for (i = 0, ring_prod = 0;
1436                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1437
1438                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1439                                                        GFP_KERNEL) < 0) {
1440                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1441                                                   i);
1442                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1443                                                   j);
1444                                         /* Cleanup already allocated elements */
1445                                         bnx2x_free_rx_sge_range(bp, fp,
1446                                                                 ring_prod);
1447                                         bnx2x_free_tpa_pool(bp, fp,
1448                                                             MAX_AGG_QS(bp));
1449                                         fp->mode = TPA_MODE_DISABLED;
1450                                         ring_prod = 0;
1451                                         break;
1452                                 }
1453                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1454                         }
1455
1456                         fp->rx_sge_prod = ring_prod;
1457                 }
1458         }
1459
1460         for_each_eth_queue(bp, j) {
1461                 struct bnx2x_fastpath *fp = &bp->fp[j];
1462
1463                 fp->rx_bd_cons = 0;
1464
1465                 /* Activate BD ring */
1466                 /* Warning!
1467                  * this will generate an interrupt (to the TSTORM)
1468                  * must only be done after chip is initialized
1469                  */
1470                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1471                                      fp->rx_sge_prod);
1472
1473                 if (j != 0)
1474                         continue;
1475
1476                 if (CHIP_IS_E1(bp)) {
1477                         REG_WR(bp, BAR_USTRORM_INTMEM +
1478                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1479                                U64_LO(fp->rx_comp_mapping));
1480                         REG_WR(bp, BAR_USTRORM_INTMEM +
1481                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1482                                U64_HI(fp->rx_comp_mapping));
1483                 }
1484         }
1485 }
1486
1487 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1488 {
1489         u8 cos;
1490         struct bnx2x *bp = fp->bp;
1491
1492         for_each_cos_in_tx_queue(fp, cos) {
1493                 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1494                 unsigned pkts_compl = 0, bytes_compl = 0;
1495
1496                 u16 sw_prod = txdata->tx_pkt_prod;
1497                 u16 sw_cons = txdata->tx_pkt_cons;
1498
1499                 while (sw_cons != sw_prod) {
1500                         bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1501                                           &pkts_compl, &bytes_compl);
1502                         sw_cons++;
1503                 }
1504
1505                 netdev_tx_reset_queue(
1506                         netdev_get_tx_queue(bp->dev,
1507                                             txdata->txq_index));
1508         }
1509 }
1510
1511 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1512 {
1513         int i;
1514
1515         for_each_tx_queue_cnic(bp, i) {
1516                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1517         }
1518 }
1519
1520 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1521 {
1522         int i;
1523
1524         for_each_eth_queue(bp, i) {
1525                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1526         }
1527 }
1528
1529 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1530 {
1531         struct bnx2x *bp = fp->bp;
1532         int i;
1533
1534         /* ring wasn't allocated */
1535         if (fp->rx_buf_ring == NULL)
1536                 return;
1537
1538         for (i = 0; i < NUM_RX_BD; i++) {
1539                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1540                 u8 *data = rx_buf->data;
1541
1542                 if (data == NULL)
1543                         continue;
1544                 dma_unmap_single(&bp->pdev->dev,
1545                                  dma_unmap_addr(rx_buf, mapping),
1546                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1547
1548                 rx_buf->data = NULL;
1549                 bnx2x_frag_free(fp, data);
1550         }
1551 }
1552
1553 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1554 {
1555         int j;
1556
1557         for_each_rx_queue_cnic(bp, j) {
1558                 bnx2x_free_rx_bds(&bp->fp[j]);
1559         }
1560 }
1561
1562 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1563 {
1564         int j;
1565
1566         for_each_eth_queue(bp, j) {
1567                 struct bnx2x_fastpath *fp = &bp->fp[j];
1568
1569                 bnx2x_free_rx_bds(fp);
1570
1571                 if (fp->mode != TPA_MODE_DISABLED)
1572                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1573         }
1574 }
1575
1576 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1577 {
1578         bnx2x_free_tx_skbs_cnic(bp);
1579         bnx2x_free_rx_skbs_cnic(bp);
1580 }
1581
1582 void bnx2x_free_skbs(struct bnx2x *bp)
1583 {
1584         bnx2x_free_tx_skbs(bp);
1585         bnx2x_free_rx_skbs(bp);
1586 }
1587
1588 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1589 {
1590         /* load old values */
1591         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1592
1593         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1594                 /* leave all but MAX value */
1595                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1596
1597                 /* set new MAX value */
1598                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1599                                 & FUNC_MF_CFG_MAX_BW_MASK;
1600
1601                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1602         }
1603 }
1604
1605 /**
1606  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1607  *
1608  * @bp:         driver handle
1609  * @nvecs:      number of vectors to be released
1610  */
1611 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1612 {
1613         int i, offset = 0;
1614
1615         if (nvecs == offset)
1616                 return;
1617
1618         /* VFs don't have a default SB */
1619         if (IS_PF(bp)) {
1620                 free_irq(bp->msix_table[offset].vector, bp->dev);
1621                 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1622                    bp->msix_table[offset].vector);
1623                 offset++;
1624         }
1625
1626         if (CNIC_SUPPORT(bp)) {
1627                 if (nvecs == offset)
1628                         return;
1629                 offset++;
1630         }
1631
1632         for_each_eth_queue(bp, i) {
1633                 if (nvecs == offset)
1634                         return;
1635                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1636                    i, bp->msix_table[offset].vector);
1637
1638                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1639         }
1640 }
1641
1642 void bnx2x_free_irq(struct bnx2x *bp)
1643 {
1644         if (bp->flags & USING_MSIX_FLAG &&
1645             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1646                 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1647
1648                 /* vfs don't have a default status block */
1649                 if (IS_PF(bp))
1650                         nvecs++;
1651
1652                 bnx2x_free_msix_irqs(bp, nvecs);
1653         } else {
1654                 free_irq(bp->dev->irq, bp->dev);
1655         }
1656 }
1657
1658 int bnx2x_enable_msix(struct bnx2x *bp)
1659 {
1660         int msix_vec = 0, i, rc;
1661
1662         /* VFs don't have a default status block */
1663         if (IS_PF(bp)) {
1664                 bp->msix_table[msix_vec].entry = msix_vec;
1665                 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1666                                bp->msix_table[0].entry);
1667                 msix_vec++;
1668         }
1669
1670         /* Cnic requires an msix vector for itself */
1671         if (CNIC_SUPPORT(bp)) {
1672                 bp->msix_table[msix_vec].entry = msix_vec;
1673                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1674                                msix_vec, bp->msix_table[msix_vec].entry);
1675                 msix_vec++;
1676         }
1677
1678         /* We need separate vectors for ETH queues only (not FCoE) */
1679         for_each_eth_queue(bp, i) {
1680                 bp->msix_table[msix_vec].entry = msix_vec;
1681                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1682                                msix_vec, msix_vec, i);
1683                 msix_vec++;
1684         }
1685
1686         DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1687            msix_vec);
1688
1689         rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1690                                    BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1691         /*
1692          * reconfigure number of tx/rx queues according to available
1693          * MSI-X vectors
1694          */
1695         if (rc == -ENOSPC) {
1696                 /* Get by with single vector */
1697                 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1698                 if (rc < 0) {
1699                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1700                                        rc);
1701                         goto no_msix;
1702                 }
1703
1704                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1705                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1706
1707                 BNX2X_DEV_INFO("set number of queues to 1\n");
1708                 bp->num_ethernet_queues = 1;
1709                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1710         } else if (rc < 0) {
1711                 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1712                 goto no_msix;
1713         } else if (rc < msix_vec) {
1714                 /* how less vectors we will have? */
1715                 int diff = msix_vec - rc;
1716
1717                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1718
1719                 /*
1720                  * decrease number of queues by number of unallocated entries
1721                  */
1722                 bp->num_ethernet_queues -= diff;
1723                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1724
1725                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1726                                bp->num_queues);
1727         }
1728
1729         bp->flags |= USING_MSIX_FLAG;
1730
1731         return 0;
1732
1733 no_msix:
1734         /* fall to INTx if not enough memory */
1735         if (rc == -ENOMEM)
1736                 bp->flags |= DISABLE_MSI_FLAG;
1737
1738         return rc;
1739 }
1740
1741 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1742 {
1743         int i, rc, offset = 0;
1744
1745         /* no default status block for vf */
1746         if (IS_PF(bp)) {
1747                 rc = request_irq(bp->msix_table[offset++].vector,
1748                                  bnx2x_msix_sp_int, 0,
1749                                  bp->dev->name, bp->dev);
1750                 if (rc) {
1751                         BNX2X_ERR("request sp irq failed\n");
1752                         return -EBUSY;
1753                 }
1754         }
1755
1756         if (CNIC_SUPPORT(bp))
1757                 offset++;
1758
1759         for_each_eth_queue(bp, i) {
1760                 struct bnx2x_fastpath *fp = &bp->fp[i];
1761                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1762                          bp->dev->name, i);
1763
1764                 rc = request_irq(bp->msix_table[offset].vector,
1765                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1766                 if (rc) {
1767                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1768                               bp->msix_table[offset].vector, rc);
1769                         bnx2x_free_msix_irqs(bp, offset);
1770                         return -EBUSY;
1771                 }
1772
1773                 offset++;
1774         }
1775
1776         i = BNX2X_NUM_ETH_QUEUES(bp);
1777         if (IS_PF(bp)) {
1778                 offset = 1 + CNIC_SUPPORT(bp);
1779                 netdev_info(bp->dev,
1780                             "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1781                             bp->msix_table[0].vector,
1782                             0, bp->msix_table[offset].vector,
1783                             i - 1, bp->msix_table[offset + i - 1].vector);
1784         } else {
1785                 offset = CNIC_SUPPORT(bp);
1786                 netdev_info(bp->dev,
1787                             "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1788                             0, bp->msix_table[offset].vector,
1789                             i - 1, bp->msix_table[offset + i - 1].vector);
1790         }
1791         return 0;
1792 }
1793
1794 int bnx2x_enable_msi(struct bnx2x *bp)
1795 {
1796         int rc;
1797
1798         rc = pci_enable_msi(bp->pdev);
1799         if (rc) {
1800                 BNX2X_DEV_INFO("MSI is not attainable\n");
1801                 return -1;
1802         }
1803         bp->flags |= USING_MSI_FLAG;
1804
1805         return 0;
1806 }
1807
1808 static int bnx2x_req_irq(struct bnx2x *bp)
1809 {
1810         unsigned long flags;
1811         unsigned int irq;
1812
1813         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1814                 flags = 0;
1815         else
1816                 flags = IRQF_SHARED;
1817
1818         if (bp->flags & USING_MSIX_FLAG)
1819                 irq = bp->msix_table[0].vector;
1820         else
1821                 irq = bp->pdev->irq;
1822
1823         return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1824 }
1825
1826 static int bnx2x_setup_irqs(struct bnx2x *bp)
1827 {
1828         int rc = 0;
1829         if (bp->flags & USING_MSIX_FLAG &&
1830             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1831                 rc = bnx2x_req_msix_irqs(bp);
1832                 if (rc)
1833                         return rc;
1834         } else {
1835                 rc = bnx2x_req_irq(bp);
1836                 if (rc) {
1837                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1838                         return rc;
1839                 }
1840                 if (bp->flags & USING_MSI_FLAG) {
1841                         bp->dev->irq = bp->pdev->irq;
1842                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1843                                     bp->dev->irq);
1844                 }
1845                 if (bp->flags & USING_MSIX_FLAG) {
1846                         bp->dev->irq = bp->msix_table[0].vector;
1847                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1848                                     bp->dev->irq);
1849                 }
1850         }
1851
1852         return 0;
1853 }
1854
1855 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1856 {
1857         int i;
1858
1859         for_each_rx_queue_cnic(bp, i) {
1860                 napi_enable(&bnx2x_fp(bp, i, napi));
1861         }
1862 }
1863
1864 static void bnx2x_napi_enable(struct bnx2x *bp)
1865 {
1866         int i;
1867
1868         for_each_eth_queue(bp, i) {
1869                 napi_enable(&bnx2x_fp(bp, i, napi));
1870         }
1871 }
1872
1873 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1874 {
1875         int i;
1876
1877         for_each_rx_queue_cnic(bp, i) {
1878                 napi_disable(&bnx2x_fp(bp, i, napi));
1879         }
1880 }
1881
1882 static void bnx2x_napi_disable(struct bnx2x *bp)
1883 {
1884         int i;
1885
1886         for_each_eth_queue(bp, i) {
1887                 napi_disable(&bnx2x_fp(bp, i, napi));
1888         }
1889 }
1890
1891 void bnx2x_netif_start(struct bnx2x *bp)
1892 {
1893         if (netif_running(bp->dev)) {
1894                 bnx2x_napi_enable(bp);
1895                 if (CNIC_LOADED(bp))
1896                         bnx2x_napi_enable_cnic(bp);
1897                 bnx2x_int_enable(bp);
1898                 if (bp->state == BNX2X_STATE_OPEN)
1899                         netif_tx_wake_all_queues(bp->dev);
1900         }
1901 }
1902
1903 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1904 {
1905         bnx2x_int_disable_sync(bp, disable_hw);
1906         bnx2x_napi_disable(bp);
1907         if (CNIC_LOADED(bp))
1908                 bnx2x_napi_disable_cnic(bp);
1909 }
1910
1911 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1912                        void *accel_priv, select_queue_fallback_t fallback)
1913 {
1914         struct bnx2x *bp = netdev_priv(dev);
1915
1916         if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1917                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1918                 u16 ether_type = ntohs(hdr->h_proto);
1919
1920                 /* Skip VLAN tag if present */
1921                 if (ether_type == ETH_P_8021Q) {
1922                         struct vlan_ethhdr *vhdr =
1923                                 (struct vlan_ethhdr *)skb->data;
1924
1925                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1926                 }
1927
1928                 /* If ethertype is FCoE or FIP - use FCoE ring */
1929                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1930                         return bnx2x_fcoe_tx(bp, txq_index);
1931         }
1932
1933         /* select a non-FCoE queue */
1934         return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1935 }
1936
1937 void bnx2x_set_num_queues(struct bnx2x *bp)
1938 {
1939         /* RSS queues */
1940         bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1941
1942         /* override in STORAGE SD modes */
1943         if (IS_MF_STORAGE_ONLY(bp))
1944                 bp->num_ethernet_queues = 1;
1945
1946         /* Add special queues */
1947         bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1948         bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1949
1950         BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1951 }
1952
1953 /**
1954  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1955  *
1956  * @bp:         Driver handle
1957  *
1958  * We currently support for at most 16 Tx queues for each CoS thus we will
1959  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1960  * bp->max_cos.
1961  *
1962  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1963  * index after all ETH L2 indices.
1964  *
1965  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1966  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1967  * 16..31,...) with indices that are not coupled with any real Tx queue.
1968  *
1969  * The proper configuration of skb->queue_mapping is handled by
1970  * bnx2x_select_queue() and __skb_tx_hash().
1971  *
1972  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1973  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1974  */
1975 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1976 {
1977         int rc, tx, rx;
1978
1979         tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1980         rx = BNX2X_NUM_ETH_QUEUES(bp);
1981
1982 /* account for fcoe queue */
1983         if (include_cnic && !NO_FCOE(bp)) {
1984                 rx++;
1985                 tx++;
1986         }
1987
1988         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1989         if (rc) {
1990                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1991                 return rc;
1992         }
1993         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1994         if (rc) {
1995                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1996                 return rc;
1997         }
1998
1999         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2000                           tx, rx);
2001
2002         return rc;
2003 }
2004
2005 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2006 {
2007         int i;
2008
2009         for_each_queue(bp, i) {
2010                 struct bnx2x_fastpath *fp = &bp->fp[i];
2011                 u32 mtu;
2012
2013                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2014                 if (IS_FCOE_IDX(i))
2015                         /*
2016                          * Although there are no IP frames expected to arrive to
2017                          * this ring we still want to add an
2018                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2019                          * overrun attack.
2020                          */
2021                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2022                 else
2023                         mtu = bp->dev->mtu;
2024                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2025                                   IP_HEADER_ALIGNMENT_PADDING +
2026                                   ETH_OVREHEAD +
2027                                   mtu +
2028                                   BNX2X_FW_RX_ALIGN_END;
2029                 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2030                 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2031                         fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2032                 else
2033                         fp->rx_frag_size = 0;
2034         }
2035 }
2036
2037 static int bnx2x_init_rss(struct bnx2x *bp)
2038 {
2039         int i;
2040         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2041
2042         /* Prepare the initial contents for the indirection table if RSS is
2043          * enabled
2044          */
2045         for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2046                 bp->rss_conf_obj.ind_table[i] =
2047                         bp->fp->cl_id +
2048                         ethtool_rxfh_indir_default(i, num_eth_queues);
2049
2050         /*
2051          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2052          * per-port, so if explicit configuration is needed , do it only
2053          * for a PMF.
2054          *
2055          * For 57712 and newer on the other hand it's a per-function
2056          * configuration.
2057          */
2058         return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2059 }
2060
2061 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2062               bool config_hash, bool enable)
2063 {
2064         struct bnx2x_config_rss_params params = {NULL};
2065
2066         /* Although RSS is meaningless when there is a single HW queue we
2067          * still need it enabled in order to have HW Rx hash generated.
2068          *
2069          * if (!is_eth_multi(bp))
2070          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2071          */
2072
2073         params.rss_obj = rss_obj;
2074
2075         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2076
2077         if (enable) {
2078                 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2079
2080                 /* RSS configuration */
2081                 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2082                 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2083                 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2084                 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2085                 if (rss_obj->udp_rss_v4)
2086                         __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2087                 if (rss_obj->udp_rss_v6)
2088                         __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2089
2090                 if (!CHIP_IS_E1x(bp)) {
2091                         /* valid only for TUNN_MODE_VXLAN tunnel mode */
2092                         __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2093                         __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2094
2095                         /* valid only for TUNN_MODE_GRE tunnel mode */
2096                         __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2097                 }
2098         } else {
2099                 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2100         }
2101
2102         /* Hash bits */
2103         params.rss_result_mask = MULTI_MASK;
2104
2105         memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2106
2107         if (config_hash) {
2108                 /* RSS keys */
2109                 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2110                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2111         }
2112
2113         if (IS_PF(bp))
2114                 return bnx2x_config_rss(bp, &params);
2115         else
2116                 return bnx2x_vfpf_config_rss(bp, &params);
2117 }
2118
2119 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2120 {
2121         struct bnx2x_func_state_params func_params = {NULL};
2122
2123         /* Prepare parameters for function state transitions */
2124         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2125
2126         func_params.f_obj = &bp->func_obj;
2127         func_params.cmd = BNX2X_F_CMD_HW_INIT;
2128
2129         func_params.params.hw_init.load_phase = load_code;
2130
2131         return bnx2x_func_state_change(bp, &func_params);
2132 }
2133
2134 /*
2135  * Cleans the object that have internal lists without sending
2136  * ramrods. Should be run when interrupts are disabled.
2137  */
2138 void bnx2x_squeeze_objects(struct bnx2x *bp)
2139 {
2140         int rc;
2141         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2142         struct bnx2x_mcast_ramrod_params rparam = {NULL};
2143         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2144
2145         /***************** Cleanup MACs' object first *************************/
2146
2147         /* Wait for completion of requested */
2148         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2149         /* Perform a dry cleanup */
2150         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2151
2152         /* Clean ETH primary MAC */
2153         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2154         rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2155                                  &ramrod_flags);
2156         if (rc != 0)
2157                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2158
2159         /* Cleanup UC list */
2160         vlan_mac_flags = 0;
2161         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2162         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2163                                  &ramrod_flags);
2164         if (rc != 0)
2165                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2166
2167         /***************** Now clean mcast object *****************************/
2168         rparam.mcast_obj = &bp->mcast_obj;
2169         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2170
2171         /* Add a DEL command... - Since we're doing a driver cleanup only,
2172          * we take a lock surrounding both the initial send and the CONTs,
2173          * as we don't want a true completion to disrupt us in the middle.
2174          */
2175         netif_addr_lock_bh(bp->dev);
2176         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2177         if (rc < 0)
2178                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2179                           rc);
2180
2181         /* ...and wait until all pending commands are cleared */
2182         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2183         while (rc != 0) {
2184                 if (rc < 0) {
2185                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2186                                   rc);
2187                         netif_addr_unlock_bh(bp->dev);
2188                         return;
2189                 }
2190
2191                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2192         }
2193         netif_addr_unlock_bh(bp->dev);
2194 }
2195
2196 #ifndef BNX2X_STOP_ON_ERROR
2197 #define LOAD_ERROR_EXIT(bp, label) \
2198         do { \
2199                 (bp)->state = BNX2X_STATE_ERROR; \
2200                 goto label; \
2201         } while (0)
2202
2203 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2204         do { \
2205                 bp->cnic_loaded = false; \
2206                 goto label; \
2207         } while (0)
2208 #else /*BNX2X_STOP_ON_ERROR*/
2209 #define LOAD_ERROR_EXIT(bp, label) \
2210         do { \
2211                 (bp)->state = BNX2X_STATE_ERROR; \
2212                 (bp)->panic = 1; \
2213                 return -EBUSY; \
2214         } while (0)
2215 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2216         do { \
2217                 bp->cnic_loaded = false; \
2218                 (bp)->panic = 1; \
2219                 return -EBUSY; \
2220         } while (0)
2221 #endif /*BNX2X_STOP_ON_ERROR*/
2222
2223 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2224 {
2225         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2226                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2227         return;
2228 }
2229
2230 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2231 {
2232         int num_groups, vf_headroom = 0;
2233         int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2234
2235         /* number of queues for statistics is number of eth queues + FCoE */
2236         u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2237
2238         /* Total number of FW statistics requests =
2239          * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2240          * and fcoe l2 queue) stats + num of queues (which includes another 1
2241          * for fcoe l2 queue if applicable)
2242          */
2243         bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2244
2245         /* vf stats appear in the request list, but their data is allocated by
2246          * the VFs themselves. We don't include them in the bp->fw_stats_num as
2247          * it is used to determine where to place the vf stats queries in the
2248          * request struct
2249          */
2250         if (IS_SRIOV(bp))
2251                 vf_headroom = bnx2x_vf_headroom(bp);
2252
2253         /* Request is built from stats_query_header and an array of
2254          * stats_query_cmd_group each of which contains
2255          * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2256          * configured in the stats_query_header.
2257          */
2258         num_groups =
2259                 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2260                  (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2261                  1 : 0));
2262
2263         DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2264            bp->fw_stats_num, vf_headroom, num_groups);
2265         bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2266                 num_groups * sizeof(struct stats_query_cmd_group);
2267
2268         /* Data for statistics requests + stats_counter
2269          * stats_counter holds per-STORM counters that are incremented
2270          * when STORM has finished with the current request.
2271          * memory for FCoE offloaded statistics are counted anyway,
2272          * even if they will not be sent.
2273          * VF stats are not accounted for here as the data of VF stats is stored
2274          * in memory allocated by the VF, not here.
2275          */
2276         bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2277                 sizeof(struct per_pf_stats) +
2278                 sizeof(struct fcoe_statistics_params) +
2279                 sizeof(struct per_queue_stats) * num_queue_stats +
2280                 sizeof(struct stats_counter);
2281
2282         bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2283                                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2284         if (!bp->fw_stats)
2285                 goto alloc_mem_err;
2286
2287         /* Set shortcuts */
2288         bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2289         bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2290         bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2291                 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2292         bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2293                 bp->fw_stats_req_sz;
2294
2295         DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2296            U64_HI(bp->fw_stats_req_mapping),
2297            U64_LO(bp->fw_stats_req_mapping));
2298         DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2299            U64_HI(bp->fw_stats_data_mapping),
2300            U64_LO(bp->fw_stats_data_mapping));
2301         return 0;
2302
2303 alloc_mem_err:
2304         bnx2x_free_fw_stats_mem(bp);
2305         BNX2X_ERR("Can't allocate FW stats memory\n");
2306         return -ENOMEM;
2307 }
2308
2309 /* send load request to mcp and analyze response */
2310 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2311 {
2312         u32 param;
2313
2314         /* init fw_seq */
2315         bp->fw_seq =
2316                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2317                  DRV_MSG_SEQ_NUMBER_MASK);
2318         BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2319
2320         /* Get current FW pulse sequence */
2321         bp->fw_drv_pulse_wr_seq =
2322                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2323                  DRV_PULSE_SEQ_MASK);
2324         BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2325
2326         param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2327
2328         if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2329                 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2330
2331         /* load request */
2332         (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2333
2334         /* if mcp fails to respond we must abort */
2335         if (!(*load_code)) {
2336                 BNX2X_ERR("MCP response failure, aborting\n");
2337                 return -EBUSY;
2338         }
2339
2340         /* If mcp refused (e.g. other port is in diagnostic mode) we
2341          * must abort
2342          */
2343         if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2344                 BNX2X_ERR("MCP refused load request, aborting\n");
2345                 return -EBUSY;
2346         }
2347         return 0;
2348 }
2349
2350 /* check whether another PF has already loaded FW to chip. In
2351  * virtualized environments a pf from another VM may have already
2352  * initialized the device including loading FW
2353  */
2354 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2355 {
2356         /* is another pf loaded on this engine? */
2357         if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2358             load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2359                 /* build my FW version dword */
2360                 /*(DEBLOBBED)*/
2361
2362                 /* read loaded FW from chip */
2363                 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2364
2365                 u32 my_fw = ~loaded_fw;
2366
2367                 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2368                    loaded_fw, my_fw);
2369
2370                 /* abort nic load if version mismatch */
2371                 if (my_fw != loaded_fw) {
2372                         if (print_err)
2373                                 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2374                                           loaded_fw, my_fw);
2375                         else
2376                                 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2377                                                loaded_fw, my_fw);
2378                         return -EBUSY;
2379                 }
2380         }
2381         return 0;
2382 }
2383
2384 /* returns the "mcp load_code" according to global load_count array */
2385 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2386 {
2387         int path = BP_PATH(bp);
2388
2389         DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2390            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2391            bnx2x_load_count[path][2]);
2392         bnx2x_load_count[path][0]++;
2393         bnx2x_load_count[path][1 + port]++;
2394         DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2395            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2396            bnx2x_load_count[path][2]);
2397         if (bnx2x_load_count[path][0] == 1)
2398                 return FW_MSG_CODE_DRV_LOAD_COMMON;
2399         else if (bnx2x_load_count[path][1 + port] == 1)
2400                 return FW_MSG_CODE_DRV_LOAD_PORT;
2401         else
2402                 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2403 }
2404
2405 /* mark PMF if applicable */
2406 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2407 {
2408         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2409             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2410             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2411                 bp->port.pmf = 1;
2412                 /* We need the barrier to ensure the ordering between the
2413                  * writing to bp->port.pmf here and reading it from the
2414                  * bnx2x_periodic_task().
2415                  */
2416                 smp_mb();
2417         } else {
2418                 bp->port.pmf = 0;
2419         }
2420
2421         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2422 }
2423
2424 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2425 {
2426         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2427              (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2428             (bp->common.shmem2_base)) {
2429                 if (SHMEM2_HAS(bp, dcc_support))
2430                         SHMEM2_WR(bp, dcc_support,
2431                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2432                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2433                 if (SHMEM2_HAS(bp, afex_driver_support))
2434                         SHMEM2_WR(bp, afex_driver_support,
2435                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2436         }
2437
2438         /* Set AFEX default VLAN tag to an invalid value */
2439         bp->afex_def_vlan_tag = -1;
2440 }
2441
2442 /**
2443  * bnx2x_bz_fp - zero content of the fastpath structure.
2444  *
2445  * @bp:         driver handle
2446  * @index:      fastpath index to be zeroed
2447  *
2448  * Makes sure the contents of the bp->fp[index].napi is kept
2449  * intact.
2450  */
2451 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2452 {
2453         struct bnx2x_fastpath *fp = &bp->fp[index];
2454         int cos;
2455         struct napi_struct orig_napi = fp->napi;
2456         struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2457
2458         /* bzero bnx2x_fastpath contents */
2459         if (fp->tpa_info)
2460                 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2461                        sizeof(struct bnx2x_agg_info));
2462         memset(fp, 0, sizeof(*fp));
2463
2464         /* Restore the NAPI object as it has been already initialized */
2465         fp->napi = orig_napi;
2466         fp->tpa_info = orig_tpa_info;
2467         fp->bp = bp;
2468         fp->index = index;
2469         if (IS_ETH_FP(fp))
2470                 fp->max_cos = bp->max_cos;
2471         else
2472                 /* Special queues support only one CoS */
2473                 fp->max_cos = 1;
2474
2475         /* Init txdata pointers */
2476         if (IS_FCOE_FP(fp))
2477                 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2478         if (IS_ETH_FP(fp))
2479                 for_each_cos_in_tx_queue(fp, cos)
2480                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2481                                 BNX2X_NUM_ETH_QUEUES(bp) + index];
2482
2483         /* set the tpa flag for each queue. The tpa flag determines the queue
2484          * minimal size so it must be set prior to queue memory allocation
2485          */
2486         if (bp->dev->features & NETIF_F_LRO)
2487                 fp->mode = TPA_MODE_LRO;
2488         else if (bp->dev->features & NETIF_F_GRO &&
2489                  bnx2x_mtu_allows_gro(bp->dev->mtu))
2490                 fp->mode = TPA_MODE_GRO;
2491         else
2492                 fp->mode = TPA_MODE_DISABLED;
2493
2494         /* We don't want TPA if it's disabled in bp
2495          * or if this is an FCoE L2 ring.
2496          */
2497         if (bp->disable_tpa || IS_FCOE_FP(fp))
2498                 fp->mode = TPA_MODE_DISABLED;
2499 }
2500
2501 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2502 {
2503         u32 cur;
2504
2505         if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2506                 return;
2507
2508         cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2509         DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2510            cur, state);
2511
2512         SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2513 }
2514
2515 int bnx2x_load_cnic(struct bnx2x *bp)
2516 {
2517         int i, rc, port = BP_PORT(bp);
2518
2519         DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2520
2521         mutex_init(&bp->cnic_mutex);
2522
2523         if (IS_PF(bp)) {
2524                 rc = bnx2x_alloc_mem_cnic(bp);
2525                 if (rc) {
2526                         BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2527                         LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2528                 }
2529         }
2530
2531         rc = bnx2x_alloc_fp_mem_cnic(bp);
2532         if (rc) {
2533                 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2534                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2535         }
2536
2537         /* Update the number of queues with the cnic queues */
2538         rc = bnx2x_set_real_num_queues(bp, 1);
2539         if (rc) {
2540                 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2541                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2542         }
2543
2544         /* Add all CNIC NAPI objects */
2545         bnx2x_add_all_napi_cnic(bp);
2546         DP(NETIF_MSG_IFUP, "cnic napi added\n");
2547         bnx2x_napi_enable_cnic(bp);
2548
2549         rc = bnx2x_init_hw_func_cnic(bp);
2550         if (rc)
2551                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2552
2553         bnx2x_nic_init_cnic(bp);
2554
2555         if (IS_PF(bp)) {
2556                 /* Enable Timer scan */
2557                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2558
2559                 /* setup cnic queues */
2560                 for_each_cnic_queue(bp, i) {
2561                         rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2562                         if (rc) {
2563                                 BNX2X_ERR("Queue setup failed\n");
2564                                 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2565                         }
2566                 }
2567         }
2568
2569         /* Initialize Rx filter. */
2570         bnx2x_set_rx_mode_inner(bp);
2571
2572         /* re-read iscsi info */
2573         bnx2x_get_iscsi_info(bp);
2574         bnx2x_setup_cnic_irq_info(bp);
2575         bnx2x_setup_cnic_info(bp);
2576         bp->cnic_loaded = true;
2577         if (bp->state == BNX2X_STATE_OPEN)
2578                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2579
2580         DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2581
2582         return 0;
2583
2584 #ifndef BNX2X_STOP_ON_ERROR
2585 load_error_cnic2:
2586         /* Disable Timer scan */
2587         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2588
2589 load_error_cnic1:
2590         bnx2x_napi_disable_cnic(bp);
2591         /* Update the number of queues without the cnic queues */
2592         if (bnx2x_set_real_num_queues(bp, 0))
2593                 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2594 load_error_cnic0:
2595         BNX2X_ERR("CNIC-related load failed\n");
2596         bnx2x_free_fp_mem_cnic(bp);
2597         bnx2x_free_mem_cnic(bp);
2598         return rc;
2599 #endif /* ! BNX2X_STOP_ON_ERROR */
2600 }
2601
2602 /* must be called with rtnl_lock */
2603 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2604 {
2605         int port = BP_PORT(bp);
2606         int i, rc = 0, load_code = 0;
2607
2608         DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2609         DP(NETIF_MSG_IFUP,
2610            "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2611
2612 #ifdef BNX2X_STOP_ON_ERROR
2613         if (unlikely(bp->panic)) {
2614                 BNX2X_ERR("Can't load NIC when there is panic\n");
2615                 return -EPERM;
2616         }
2617 #endif
2618
2619         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2620
2621         /* zero the structure w/o any lock, before SP handler is initialized */
2622         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2623         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2624                 &bp->last_reported_link.link_report_flags);
2625
2626         if (IS_PF(bp))
2627                 /* must be called before memory allocation and HW init */
2628                 bnx2x_ilt_set_info(bp);
2629
2630         /*
2631          * Zero fastpath structures preserving invariants like napi, which are
2632          * allocated only once, fp index, max_cos, bp pointer.
2633          * Also set fp->mode and txdata_ptr.
2634          */
2635         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2636         for_each_queue(bp, i)
2637                 bnx2x_bz_fp(bp, i);
2638         memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2639                                   bp->num_cnic_queues) *
2640                                   sizeof(struct bnx2x_fp_txdata));
2641
2642         bp->fcoe_init = false;
2643
2644         /* Set the receive queues buffer size */
2645         bnx2x_set_rx_buf_size(bp);
2646
2647         if (IS_PF(bp)) {
2648                 rc = bnx2x_alloc_mem(bp);
2649                 if (rc) {
2650                         BNX2X_ERR("Unable to allocate bp memory\n");
2651                         return rc;
2652                 }
2653         }
2654
2655         /* need to be done after alloc mem, since it's self adjusting to amount
2656          * of memory available for RSS queues
2657          */
2658         rc = bnx2x_alloc_fp_mem(bp);
2659         if (rc) {
2660                 BNX2X_ERR("Unable to allocate memory for fps\n");
2661                 LOAD_ERROR_EXIT(bp, load_error0);
2662         }
2663
2664         /* Allocated memory for FW statistics  */
2665         if (bnx2x_alloc_fw_stats_mem(bp))
2666                 LOAD_ERROR_EXIT(bp, load_error0);
2667
2668         /* request pf to initialize status blocks */
2669         if (IS_VF(bp)) {
2670                 rc = bnx2x_vfpf_init(bp);
2671                 if (rc)
2672                         LOAD_ERROR_EXIT(bp, load_error0);
2673         }
2674
2675         /* As long as bnx2x_alloc_mem() may possibly update
2676          * bp->num_queues, bnx2x_set_real_num_queues() should always
2677          * come after it. At this stage cnic queues are not counted.
2678          */
2679         rc = bnx2x_set_real_num_queues(bp, 0);
2680         if (rc) {
2681                 BNX2X_ERR("Unable to set real_num_queues\n");
2682                 LOAD_ERROR_EXIT(bp, load_error0);
2683         }
2684
2685         /* configure multi cos mappings in kernel.
2686          * this configuration may be overridden by a multi class queue
2687          * discipline or by a dcbx negotiation result.
2688          */
2689         bnx2x_setup_tc(bp->dev, bp->max_cos);
2690
2691         /* Add all NAPI objects */
2692         bnx2x_add_all_napi(bp);
2693         DP(NETIF_MSG_IFUP, "napi added\n");
2694         bnx2x_napi_enable(bp);
2695
2696         if (IS_PF(bp)) {
2697                 /* set pf load just before approaching the MCP */
2698                 bnx2x_set_pf_load(bp);
2699
2700                 /* if mcp exists send load request and analyze response */
2701                 if (!BP_NOMCP(bp)) {
2702                         /* attempt to load pf */
2703                         rc = bnx2x_nic_load_request(bp, &load_code);
2704                         if (rc)
2705                                 LOAD_ERROR_EXIT(bp, load_error1);
2706
2707                         /* what did mcp say? */
2708                         rc = bnx2x_compare_fw_ver(bp, load_code, true);
2709                         if (rc) {
2710                                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2711                                 LOAD_ERROR_EXIT(bp, load_error2);
2712                         }
2713                 } else {
2714                         load_code = bnx2x_nic_load_no_mcp(bp, port);
2715                 }
2716
2717                 /* mark pmf if applicable */
2718                 bnx2x_nic_load_pmf(bp, load_code);
2719
2720                 /* Init Function state controlling object */
2721                 bnx2x__init_func_obj(bp);
2722
2723                 /* Initialize HW */
2724                 rc = bnx2x_init_hw(bp, load_code);
2725                 if (rc) {
2726                         BNX2X_ERR("HW init failed, aborting\n");
2727                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2728                         LOAD_ERROR_EXIT(bp, load_error2);
2729                 }
2730         }
2731
2732         bnx2x_pre_irq_nic_init(bp);
2733
2734         /* Connect to IRQs */
2735         rc = bnx2x_setup_irqs(bp);
2736         if (rc) {
2737                 BNX2X_ERR("setup irqs failed\n");
2738                 if (IS_PF(bp))
2739                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2740                 LOAD_ERROR_EXIT(bp, load_error2);
2741         }
2742
2743         /* Init per-function objects */
2744         if (IS_PF(bp)) {
2745                 /* Setup NIC internals and enable interrupts */
2746                 bnx2x_post_irq_nic_init(bp, load_code);
2747
2748                 bnx2x_init_bp_objs(bp);
2749                 bnx2x_iov_nic_init(bp);
2750
2751                 /* Set AFEX default VLAN tag to an invalid value */
2752                 bp->afex_def_vlan_tag = -1;
2753                 bnx2x_nic_load_afex_dcc(bp, load_code);
2754                 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2755                 rc = bnx2x_func_start(bp);
2756                 if (rc) {
2757                         BNX2X_ERR("Function start failed!\n");
2758                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2759
2760                         LOAD_ERROR_EXIT(bp, load_error3);
2761                 }
2762
2763                 /* Send LOAD_DONE command to MCP */
2764                 if (!BP_NOMCP(bp)) {
2765                         load_code = bnx2x_fw_command(bp,
2766                                                      DRV_MSG_CODE_LOAD_DONE, 0);
2767                         if (!load_code) {
2768                                 BNX2X_ERR("MCP response failure, aborting\n");
2769                                 rc = -EBUSY;
2770                                 LOAD_ERROR_EXIT(bp, load_error3);
2771                         }
2772                 }
2773
2774                 /* initialize FW coalescing state machines in RAM */
2775                 bnx2x_update_coalesce(bp);
2776         }
2777
2778         /* setup the leading queue */
2779         rc = bnx2x_setup_leading(bp);
2780         if (rc) {
2781                 BNX2X_ERR("Setup leading failed!\n");
2782                 LOAD_ERROR_EXIT(bp, load_error3);
2783         }
2784
2785         /* set up the rest of the queues */
2786         for_each_nondefault_eth_queue(bp, i) {
2787                 if (IS_PF(bp))
2788                         rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2789                 else /* VF */
2790                         rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2791                 if (rc) {
2792                         BNX2X_ERR("Queue %d setup failed\n", i);
2793                         LOAD_ERROR_EXIT(bp, load_error3);
2794                 }
2795         }
2796
2797         /* setup rss */
2798         rc = bnx2x_init_rss(bp);
2799         if (rc) {
2800                 BNX2X_ERR("PF RSS init failed\n");
2801                 LOAD_ERROR_EXIT(bp, load_error3);
2802         }
2803
2804         /* Now when Clients are configured we are ready to work */
2805         bp->state = BNX2X_STATE_OPEN;
2806
2807         /* Configure a ucast MAC */
2808         if (IS_PF(bp))
2809                 rc = bnx2x_set_eth_mac(bp, true);
2810         else /* vf */
2811                 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2812                                            true);
2813         if (rc) {
2814                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2815                 LOAD_ERROR_EXIT(bp, load_error3);
2816         }
2817
2818         if (IS_PF(bp) && bp->pending_max) {
2819                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2820                 bp->pending_max = 0;
2821         }
2822
2823         if (bp->port.pmf) {
2824                 rc = bnx2x_initial_phy_init(bp, load_mode);
2825                 if (rc)
2826                         LOAD_ERROR_EXIT(bp, load_error3);
2827         }
2828         bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2829
2830         /* Start fast path */
2831
2832         /* Re-configure vlan filters */
2833         rc = bnx2x_vlan_reconfigure_vid(bp);
2834         if (rc)
2835                 LOAD_ERROR_EXIT(bp, load_error3);
2836
2837         /* Initialize Rx filter. */
2838         bnx2x_set_rx_mode_inner(bp);
2839
2840         if (bp->flags & PTP_SUPPORTED) {
2841                 bnx2x_init_ptp(bp);
2842                 bnx2x_configure_ptp_filters(bp);
2843         }
2844         /* Start Tx */
2845         switch (load_mode) {
2846         case LOAD_NORMAL:
2847                 /* Tx queue should be only re-enabled */
2848                 netif_tx_wake_all_queues(bp->dev);
2849                 break;
2850
2851         case LOAD_OPEN:
2852                 netif_tx_start_all_queues(bp->dev);
2853                 smp_mb__after_atomic();
2854                 break;
2855
2856         case LOAD_DIAG:
2857         case LOAD_LOOPBACK_EXT:
2858                 bp->state = BNX2X_STATE_DIAG;
2859                 break;
2860
2861         default:
2862                 break;
2863         }
2864
2865         if (bp->port.pmf)
2866                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2867         else
2868                 bnx2x__link_status_update(bp);
2869
2870         /* start the timer */
2871         mod_timer(&bp->timer, jiffies + bp->current_interval);
2872
2873         if (CNIC_ENABLED(bp))
2874                 bnx2x_load_cnic(bp);
2875
2876         if (IS_PF(bp))
2877                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2878
2879         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2880                 /* mark driver is loaded in shmem2 */
2881                 u32 val;
2882                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2883                 val &= ~DRV_FLAGS_MTU_MASK;
2884                 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2885                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2886                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2887                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2888         }
2889
2890         /* Wait for all pending SP commands to complete */
2891         if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2892                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2893                 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2894                 return -EBUSY;
2895         }
2896
2897         /* Update driver data for On-Chip MFW dump. */
2898         if (IS_PF(bp))
2899                 bnx2x_update_mfw_dump(bp);
2900
2901         /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2902         if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2903                 bnx2x_dcbx_init(bp, false);
2904
2905         if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2906                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2907
2908         DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2909
2910         return 0;
2911
2912 #ifndef BNX2X_STOP_ON_ERROR
2913 load_error3:
2914         if (IS_PF(bp)) {
2915                 bnx2x_int_disable_sync(bp, 1);
2916
2917                 /* Clean queueable objects */
2918                 bnx2x_squeeze_objects(bp);
2919         }
2920
2921         /* Free SKBs, SGEs, TPA pool and driver internals */
2922         bnx2x_free_skbs(bp);
2923         for_each_rx_queue(bp, i)
2924                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2925
2926         /* Release IRQs */
2927         bnx2x_free_irq(bp);
2928 load_error2:
2929         if (IS_PF(bp) && !BP_NOMCP(bp)) {
2930                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2931                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2932         }
2933
2934         bp->port.pmf = 0;
2935 load_error1:
2936         bnx2x_napi_disable(bp);
2937         bnx2x_del_all_napi(bp);
2938
2939         /* clear pf_load status, as it was already set */
2940         if (IS_PF(bp))
2941                 bnx2x_clear_pf_load(bp);
2942 load_error0:
2943         bnx2x_free_fw_stats_mem(bp);
2944         bnx2x_free_fp_mem(bp);
2945         bnx2x_free_mem(bp);
2946
2947         return rc;
2948 #endif /* ! BNX2X_STOP_ON_ERROR */
2949 }
2950
2951 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2952 {
2953         u8 rc = 0, cos, i;
2954
2955         /* Wait until tx fastpath tasks complete */
2956         for_each_tx_queue(bp, i) {
2957                 struct bnx2x_fastpath *fp = &bp->fp[i];
2958
2959                 for_each_cos_in_tx_queue(fp, cos)
2960                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2961                 if (rc)
2962                         return rc;
2963         }
2964         return 0;
2965 }
2966
2967 /* must be called with rtnl_lock */
2968 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2969 {
2970         int i;
2971         bool global = false;
2972
2973         DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2974
2975         if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2976                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2977
2978         /* mark driver is unloaded in shmem2 */
2979         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2980                 u32 val;
2981                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2982                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2983                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2984         }
2985
2986         if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2987             (bp->state == BNX2X_STATE_CLOSED ||
2988              bp->state == BNX2X_STATE_ERROR)) {
2989                 /* We can get here if the driver has been unloaded
2990                  * during parity error recovery and is either waiting for a
2991                  * leader to complete or for other functions to unload and
2992                  * then ifdown has been issued. In this case we want to
2993                  * unload and let other functions to complete a recovery
2994                  * process.
2995                  */
2996                 bp->recovery_state = BNX2X_RECOVERY_DONE;
2997                 bp->is_leader = 0;
2998                 bnx2x_release_leader_lock(bp);
2999                 smp_mb();
3000
3001                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3002                 BNX2X_ERR("Can't unload in closed or error state\n");
3003                 return -EINVAL;
3004         }
3005
3006         /* Nothing to do during unload if previous bnx2x_nic_load()
3007          * have not completed successfully - all resources are released.
3008          *
3009          * we can get here only after unsuccessful ndo_* callback, during which
3010          * dev->IFF_UP flag is still on.
3011          */
3012         if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3013                 return 0;
3014
3015         /* It's important to set the bp->state to the value different from
3016          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3017          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3018          */
3019         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3020         smp_mb();
3021
3022         /* indicate to VFs that the PF is going down */
3023         bnx2x_iov_channel_down(bp);
3024
3025         if (CNIC_LOADED(bp))
3026                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3027
3028         /* Stop Tx */
3029         bnx2x_tx_disable(bp);
3030         netdev_reset_tc(bp->dev);
3031
3032         bp->rx_mode = BNX2X_RX_MODE_NONE;
3033
3034         del_timer_sync(&bp->timer);
3035
3036         if (IS_PF(bp)) {
3037                 /* Set ALWAYS_ALIVE bit in shmem */
3038                 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3039                 bnx2x_drv_pulse(bp);
3040                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3041                 bnx2x_save_statistics(bp);
3042         }
3043
3044         /* wait till consumers catch up with producers in all queues.
3045          * If we're recovering, FW can't write to host so no reason
3046          * to wait for the queues to complete all Tx.
3047          */
3048         if (unload_mode != UNLOAD_RECOVERY)
3049                 bnx2x_drain_tx_queues(bp);
3050
3051         /* if VF indicate to PF this function is going down (PF will delete sp
3052          * elements and clear initializations
3053          */
3054         if (IS_VF(bp))
3055                 bnx2x_vfpf_close_vf(bp);
3056         else if (unload_mode != UNLOAD_RECOVERY)
3057                 /* if this is a normal/close unload need to clean up chip*/
3058                 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3059         else {
3060                 /* Send the UNLOAD_REQUEST to the MCP */
3061                 bnx2x_send_unload_req(bp, unload_mode);
3062
3063                 /* Prevent transactions to host from the functions on the
3064                  * engine that doesn't reset global blocks in case of global
3065                  * attention once global blocks are reset and gates are opened
3066                  * (the engine which leader will perform the recovery
3067                  * last).
3068                  */
3069                 if (!CHIP_IS_E1x(bp))
3070                         bnx2x_pf_disable(bp);
3071
3072                 /* Disable HW interrupts, NAPI */
3073                 bnx2x_netif_stop(bp, 1);
3074                 /* Delete all NAPI objects */
3075                 bnx2x_del_all_napi(bp);
3076                 if (CNIC_LOADED(bp))
3077                         bnx2x_del_all_napi_cnic(bp);
3078                 /* Release IRQs */
3079                 bnx2x_free_irq(bp);
3080
3081                 /* Report UNLOAD_DONE to MCP */
3082                 bnx2x_send_unload_done(bp, false);
3083         }
3084
3085         /*
3086          * At this stage no more interrupts will arrive so we may safely clean
3087          * the queueable objects here in case they failed to get cleaned so far.
3088          */
3089         if (IS_PF(bp))
3090                 bnx2x_squeeze_objects(bp);
3091
3092         /* There should be no more pending SP commands at this stage */
3093         bp->sp_state = 0;
3094
3095         bp->port.pmf = 0;
3096
3097         /* clear pending work in rtnl task */
3098         bp->sp_rtnl_state = 0;
3099         smp_mb();
3100
3101         /* Free SKBs, SGEs, TPA pool and driver internals */
3102         bnx2x_free_skbs(bp);
3103         if (CNIC_LOADED(bp))
3104                 bnx2x_free_skbs_cnic(bp);
3105         for_each_rx_queue(bp, i)
3106                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3107
3108         bnx2x_free_fp_mem(bp);
3109         if (CNIC_LOADED(bp))
3110                 bnx2x_free_fp_mem_cnic(bp);
3111
3112         if (IS_PF(bp)) {
3113                 if (CNIC_LOADED(bp))
3114                         bnx2x_free_mem_cnic(bp);
3115         }
3116         bnx2x_free_mem(bp);
3117
3118         bp->state = BNX2X_STATE_CLOSED;
3119         bp->cnic_loaded = false;
3120
3121         /* Clear driver version indication in shmem */
3122         if (IS_PF(bp))
3123                 bnx2x_update_mng_version(bp);
3124
3125         /* Check if there are pending parity attentions. If there are - set
3126          * RECOVERY_IN_PROGRESS.
3127          */
3128         if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3129                 bnx2x_set_reset_in_progress(bp);
3130
3131                 /* Set RESET_IS_GLOBAL if needed */
3132                 if (global)
3133                         bnx2x_set_reset_global(bp);
3134         }
3135
3136         /* The last driver must disable a "close the gate" if there is no
3137          * parity attention or "process kill" pending.
3138          */
3139         if (IS_PF(bp) &&
3140             !bnx2x_clear_pf_load(bp) &&
3141             bnx2x_reset_is_done(bp, BP_PATH(bp)))
3142                 bnx2x_disable_close_the_gate(bp);
3143
3144         DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3145
3146         return 0;
3147 }
3148
3149 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3150 {
3151         u16 pmcsr;
3152
3153         /* If there is no power capability, silently succeed */
3154         if (!bp->pdev->pm_cap) {
3155                 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3156                 return 0;
3157         }
3158
3159         pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3160
3161         switch (state) {
3162         case PCI_D0:
3163                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3164                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3165                                        PCI_PM_CTRL_PME_STATUS));
3166
3167                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3168                         /* delay required during transition out of D3hot */
3169                         msleep(20);
3170                 break;
3171
3172         case PCI_D3hot:
3173                 /* If there are other clients above don't
3174                    shut down the power */
3175                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3176                         return 0;
3177                 /* Don't shut down the power for emulation and FPGA */
3178                 if (CHIP_REV_IS_SLOW(bp))
3179                         return 0;
3180
3181                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3182                 pmcsr |= 3;
3183
3184                 if (bp->wol)
3185                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3186
3187                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3188                                       pmcsr);
3189
3190                 /* No more memory access after this point until
3191                 * device is brought back to D0.
3192                 */
3193                 break;
3194
3195         default:
3196                 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3197                 return -EINVAL;
3198         }
3199         return 0;
3200 }
3201
3202 /*
3203  * net_device service functions
3204  */
3205 static int bnx2x_poll(struct napi_struct *napi, int budget)
3206 {
3207         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3208                                                  napi);
3209         struct bnx2x *bp = fp->bp;
3210         int rx_work_done;
3211         u8 cos;
3212
3213 #ifdef BNX2X_STOP_ON_ERROR
3214         if (unlikely(bp->panic)) {
3215                 napi_complete(napi);
3216                 return 0;
3217         }
3218 #endif
3219         for_each_cos_in_tx_queue(fp, cos)
3220                 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3221                         bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3222
3223         rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3224
3225         if (rx_work_done < budget) {
3226                 /* No need to update SB for FCoE L2 ring as long as
3227                  * it's connected to the default SB and the SB
3228                  * has been updated when NAPI was scheduled.
3229                  */
3230                 if (IS_FCOE_FP(fp)) {
3231                         napi_complete(napi);
3232                 } else {
3233                         bnx2x_update_fpsb_idx(fp);
3234                         /* bnx2x_has_rx_work() reads the status block,
3235                          * thus we need to ensure that status block indices
3236                          * have been actually read (bnx2x_update_fpsb_idx)
3237                          * prior to this check (bnx2x_has_rx_work) so that
3238                          * we won't write the "newer" value of the status block
3239                          * to IGU (if there was a DMA right after
3240                          * bnx2x_has_rx_work and if there is no rmb, the memory
3241                          * reading (bnx2x_update_fpsb_idx) may be postponed
3242                          * to right before bnx2x_ack_sb). In this case there
3243                          * will never be another interrupt until there is
3244                          * another update of the status block, while there
3245                          * is still unhandled work.
3246                          */
3247                         rmb();
3248
3249                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3250                                 napi_complete(napi);
3251                                 /* Re-enable interrupts */
3252                                 DP(NETIF_MSG_RX_STATUS,
3253                                    "Update index to %d\n", fp->fp_hc_idx);
3254                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3255                                              le16_to_cpu(fp->fp_hc_idx),
3256                                              IGU_INT_ENABLE, 1);
3257                         } else {
3258                                 rx_work_done = budget;
3259                         }
3260                 }
3261         }
3262
3263         return rx_work_done;
3264 }
3265
3266 /* we split the first BD into headers and data BDs
3267  * to ease the pain of our fellow microcode engineers
3268  * we use one mapping for both BDs
3269  */
3270 static u16 bnx2x_tx_split(struct bnx2x *bp,
3271                           struct bnx2x_fp_txdata *txdata,
3272                           struct sw_tx_bd *tx_buf,
3273                           struct eth_tx_start_bd **tx_bd, u16 hlen,
3274                           u16 bd_prod)
3275 {
3276         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3277         struct eth_tx_bd *d_tx_bd;
3278         dma_addr_t mapping;
3279         int old_len = le16_to_cpu(h_tx_bd->nbytes);
3280
3281         /* first fix first BD */
3282         h_tx_bd->nbytes = cpu_to_le16(hlen);
3283
3284         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3285            h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3286
3287         /* now get a new data BD
3288          * (after the pbd) and fill it */
3289         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3290         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3291
3292         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3293                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3294
3295         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3296         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3297         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3298
3299         /* this marks the BD as one that has no individual mapping */
3300         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3301
3302         DP(NETIF_MSG_TX_QUEUED,
3303            "TSO split data size is %d (%x:%x)\n",
3304            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3305
3306         /* update tx_bd */
3307         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3308
3309         return bd_prod;
3310 }
3311
3312 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3313 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3314 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3315 {
3316         __sum16 tsum = (__force __sum16) csum;
3317
3318         if (fix > 0)
3319                 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3320                                   csum_partial(t_header - fix, fix, 0)));
3321
3322         else if (fix < 0)
3323                 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3324                                   csum_partial(t_header, -fix, 0)));
3325
3326         return bswab16(tsum);
3327 }
3328
3329 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3330 {
3331         u32 rc;
3332         __u8 prot = 0;
3333         __be16 protocol;
3334
3335         if (skb->ip_summed != CHECKSUM_PARTIAL)
3336                 return XMIT_PLAIN;
3337
3338         protocol = vlan_get_protocol(skb);
3339         if (protocol == htons(ETH_P_IPV6)) {
3340                 rc = XMIT_CSUM_V6;
3341                 prot = ipv6_hdr(skb)->nexthdr;
3342         } else {
3343                 rc = XMIT_CSUM_V4;
3344                 prot = ip_hdr(skb)->protocol;
3345         }
3346
3347         if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3348                 if (inner_ip_hdr(skb)->version == 6) {
3349                         rc |= XMIT_CSUM_ENC_V6;
3350                         if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3351                                 rc |= XMIT_CSUM_TCP;
3352                 } else {
3353                         rc |= XMIT_CSUM_ENC_V4;
3354                         if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3355                                 rc |= XMIT_CSUM_TCP;
3356                 }
3357         }
3358         if (prot == IPPROTO_TCP)
3359                 rc |= XMIT_CSUM_TCP;
3360
3361         if (skb_is_gso(skb)) {
3362                 if (skb_is_gso_v6(skb)) {
3363                         rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3364                         if (rc & XMIT_CSUM_ENC)
3365                                 rc |= XMIT_GSO_ENC_V6;
3366                 } else {
3367                         rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3368                         if (rc & XMIT_CSUM_ENC)
3369                                 rc |= XMIT_GSO_ENC_V4;
3370                 }
3371         }
3372
3373         return rc;
3374 }
3375
3376 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3377 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS         4
3378
3379 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3380 #define BNX2X_NUM_TSO_WIN_SUB_BDS               3
3381
3382 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3383 /* check if packet requires linearization (packet is too fragmented)
3384    no need to check fragmentation if page size > 8K (there will be no
3385    violation to FW restrictions) */
3386 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3387                              u32 xmit_type)
3388 {
3389         int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3390         int to_copy = 0, hlen = 0;
3391
3392         if (xmit_type & XMIT_GSO_ENC)
3393                 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3394
3395         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3396                 if (xmit_type & XMIT_GSO) {
3397                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3398                         int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3399                         /* Number of windows to check */
3400                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3401                         int wnd_idx = 0;
3402                         int frag_idx = 0;
3403                         u32 wnd_sum = 0;
3404
3405                         /* Headers length */
3406                         if (xmit_type & XMIT_GSO_ENC)
3407                                 hlen = (int)(skb_inner_transport_header(skb) -
3408                                              skb->data) +
3409                                              inner_tcp_hdrlen(skb);
3410                         else
3411                                 hlen = (int)(skb_transport_header(skb) -
3412                                              skb->data) + tcp_hdrlen(skb);
3413
3414                         /* Amount of data (w/o headers) on linear part of SKB*/
3415                         first_bd_sz = skb_headlen(skb) - hlen;
3416
3417                         wnd_sum  = first_bd_sz;
3418
3419                         /* Calculate the first sum - it's special */
3420                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3421                                 wnd_sum +=
3422                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3423
3424                         /* If there was data on linear skb data - check it */
3425                         if (first_bd_sz > 0) {
3426                                 if (unlikely(wnd_sum < lso_mss)) {
3427                                         to_copy = 1;
3428                                         goto exit_lbl;
3429                                 }
3430
3431                                 wnd_sum -= first_bd_sz;
3432                         }
3433
3434                         /* Others are easier: run through the frag list and
3435                            check all windows */
3436                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3437                                 wnd_sum +=
3438                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3439
3440                                 if (unlikely(wnd_sum < lso_mss)) {
3441                                         to_copy = 1;
3442                                         break;
3443                                 }
3444                                 wnd_sum -=
3445                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3446                         }
3447                 } else {
3448                         /* in non-LSO too fragmented packet should always
3449                            be linearized */
3450                         to_copy = 1;
3451                 }
3452         }
3453
3454 exit_lbl:
3455         if (unlikely(to_copy))
3456                 DP(NETIF_MSG_TX_QUEUED,
3457                    "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3458                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3459                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3460
3461         return to_copy;
3462 }
3463 #endif
3464
3465 /**
3466  * bnx2x_set_pbd_gso - update PBD in GSO case.
3467  *
3468  * @skb:        packet skb
3469  * @pbd:        parse BD
3470  * @xmit_type:  xmit flags
3471  */
3472 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3473                               struct eth_tx_parse_bd_e1x *pbd,
3474                               u32 xmit_type)
3475 {
3476         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3477         pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3478         pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3479
3480         if (xmit_type & XMIT_GSO_V4) {
3481                 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3482                 pbd->tcp_pseudo_csum =
3483                         bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3484                                                    ip_hdr(skb)->daddr,
3485                                                    0, IPPROTO_TCP, 0));
3486         } else {
3487                 pbd->tcp_pseudo_csum =
3488                         bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3489                                                  &ipv6_hdr(skb)->daddr,
3490                                                  0, IPPROTO_TCP, 0));
3491         }
3492
3493         pbd->global_data |=
3494                 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3495 }
3496
3497 /**
3498  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3499  *
3500  * @bp:                 driver handle
3501  * @skb:                packet skb
3502  * @parsing_data:       data to be updated
3503  * @xmit_type:          xmit flags
3504  *
3505  * 57712/578xx related, when skb has encapsulation
3506  */
3507 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3508                                  u32 *parsing_data, u32 xmit_type)
3509 {
3510         *parsing_data |=
3511                 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3512                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3513                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3514
3515         if (xmit_type & XMIT_CSUM_TCP) {
3516                 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3517                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3518                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3519
3520                 return skb_inner_transport_header(skb) +
3521                         inner_tcp_hdrlen(skb) - skb->data;
3522         }
3523
3524         /* We support checksum offload for TCP and UDP only.
3525          * No need to pass the UDP header length - it's a constant.
3526          */
3527         return skb_inner_transport_header(skb) +
3528                 sizeof(struct udphdr) - skb->data;
3529 }
3530
3531 /**
3532  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3533  *
3534  * @bp:                 driver handle
3535  * @skb:                packet skb
3536  * @parsing_data:       data to be updated
3537  * @xmit_type:          xmit flags
3538  *
3539  * 57712/578xx related
3540  */
3541 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3542                                 u32 *parsing_data, u32 xmit_type)
3543 {
3544         *parsing_data |=
3545                 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3546                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3547                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3548
3549         if (xmit_type & XMIT_CSUM_TCP) {
3550                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3551                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3552                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3553
3554                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3555         }
3556         /* We support checksum offload for TCP and UDP only.
3557          * No need to pass the UDP header length - it's a constant.
3558          */
3559         return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3560 }
3561
3562 /* set FW indication according to inner or outer protocols if tunneled */
3563 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3564                                struct eth_tx_start_bd *tx_start_bd,
3565                                u32 xmit_type)
3566 {
3567         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3568
3569         if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3570                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3571
3572         if (!(xmit_type & XMIT_CSUM_TCP))
3573                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3574 }
3575
3576 /**
3577  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3578  *
3579  * @bp:         driver handle
3580  * @skb:        packet skb
3581  * @pbd:        parse BD to be updated
3582  * @xmit_type:  xmit flags
3583  */
3584 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3585                              struct eth_tx_parse_bd_e1x *pbd,
3586                              u32 xmit_type)
3587 {
3588         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3589
3590         /* for now NS flag is not used in Linux */
3591         pbd->global_data =
3592                 cpu_to_le16(hlen |
3593                             ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3594                              ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3595
3596         pbd->ip_hlen_w = (skb_transport_header(skb) -
3597                         skb_network_header(skb)) >> 1;
3598
3599         hlen += pbd->ip_hlen_w;
3600
3601         /* We support checksum offload for TCP and UDP only */
3602         if (xmit_type & XMIT_CSUM_TCP)
3603                 hlen += tcp_hdrlen(skb) / 2;
3604         else
3605                 hlen += sizeof(struct udphdr) / 2;
3606
3607         pbd->total_hlen_w = cpu_to_le16(hlen);
3608         hlen = hlen*2;
3609
3610         if (xmit_type & XMIT_CSUM_TCP) {
3611                 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3612
3613         } else {
3614                 s8 fix = SKB_CS_OFF(skb); /* signed! */
3615
3616                 DP(NETIF_MSG_TX_QUEUED,
3617                    "hlen %d  fix %d  csum before fix %x\n",
3618                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3619
3620                 /* HW bug: fixup the CSUM */
3621                 pbd->tcp_pseudo_csum =
3622                         bnx2x_csum_fix(skb_transport_header(skb),
3623                                        SKB_CS(skb), fix);
3624
3625                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3626                    pbd->tcp_pseudo_csum);
3627         }
3628
3629         return hlen;
3630 }
3631
3632 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3633                                       struct eth_tx_parse_bd_e2 *pbd_e2,
3634                                       struct eth_tx_parse_2nd_bd *pbd2,
3635                                       u16 *global_data,
3636                                       u32 xmit_type)
3637 {
3638         u16 hlen_w = 0;
3639         u8 outerip_off, outerip_len = 0;
3640
3641         /* from outer IP to transport */
3642         hlen_w = (skb_inner_transport_header(skb) -
3643                   skb_network_header(skb)) >> 1;
3644
3645         /* transport len */
3646         hlen_w += inner_tcp_hdrlen(skb) >> 1;
3647
3648         pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3649
3650         /* outer IP header info */
3651         if (xmit_type & XMIT_CSUM_V4) {
3652                 struct iphdr *iph = ip_hdr(skb);
3653                 u32 csum = (__force u32)(~iph->check) -
3654                            (__force u32)iph->tot_len -
3655                            (__force u32)iph->frag_off;
3656
3657                 outerip_len = iph->ihl << 1;
3658
3659                 pbd2->fw_ip_csum_wo_len_flags_frag =
3660                         bswab16(csum_fold((__force __wsum)csum));
3661         } else {
3662                 pbd2->fw_ip_hdr_to_payload_w =
3663                         hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3664                 pbd_e2->data.tunnel_data.flags |=
3665                         ETH_TUNNEL_DATA_IPV6_OUTER;
3666         }
3667
3668         pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3669
3670         pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3671
3672         /* inner IP header info */
3673         if (xmit_type & XMIT_CSUM_ENC_V4) {
3674                 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3675
3676                 pbd_e2->data.tunnel_data.pseudo_csum =
3677                         bswab16(~csum_tcpudp_magic(
3678                                         inner_ip_hdr(skb)->saddr,
3679                                         inner_ip_hdr(skb)->daddr,
3680                                         0, IPPROTO_TCP, 0));
3681         } else {
3682                 pbd_e2->data.tunnel_data.pseudo_csum =
3683                         bswab16(~csum_ipv6_magic(
3684                                         &inner_ipv6_hdr(skb)->saddr,
3685                                         &inner_ipv6_hdr(skb)->daddr,
3686                                         0, IPPROTO_TCP, 0));
3687         }
3688
3689         outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3690
3691         *global_data |=
3692                 outerip_off |
3693                 (outerip_len <<
3694                         ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3695                 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3696                         ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3697
3698         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3699                 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3700                 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3701         }
3702 }
3703
3704 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3705                                          u32 xmit_type)
3706 {
3707         struct ipv6hdr *ipv6;
3708
3709         if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3710                 return;
3711
3712         if (xmit_type & XMIT_GSO_ENC_V6)
3713                 ipv6 = inner_ipv6_hdr(skb);
3714         else /* XMIT_GSO_V6 */
3715                 ipv6 = ipv6_hdr(skb);
3716
3717         if (ipv6->nexthdr == NEXTHDR_IPV6)
3718                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3719 }
3720
3721 /* called with netif_tx_lock
3722  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3723  * netif_wake_queue()
3724  */
3725 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3726 {
3727         struct bnx2x *bp = netdev_priv(dev);
3728
3729         struct netdev_queue *txq;
3730         struct bnx2x_fp_txdata *txdata;
3731         struct sw_tx_bd *tx_buf;
3732         struct eth_tx_start_bd *tx_start_bd, *first_bd;
3733         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3734         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3735         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3736         struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3737         u32 pbd_e2_parsing_data = 0;
3738         u16 pkt_prod, bd_prod;
3739         int nbd, txq_index;
3740         dma_addr_t mapping;
3741         u32 xmit_type = bnx2x_xmit_type(bp, skb);
3742         int i;
3743         u8 hlen = 0;
3744         __le16 pkt_size = 0;
3745         struct ethhdr *eth;
3746         u8 mac_type = UNICAST_ADDRESS;
3747
3748 #ifdef BNX2X_STOP_ON_ERROR
3749         if (unlikely(bp->panic))
3750                 return NETDEV_TX_BUSY;
3751 #endif
3752
3753         txq_index = skb_get_queue_mapping(skb);
3754         txq = netdev_get_tx_queue(dev, txq_index);
3755
3756         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3757
3758         txdata = &bp->bnx2x_txq[txq_index];
3759
3760         /* enable this debug print to view the transmission queue being used
3761         DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3762            txq_index, fp_index, txdata_index); */
3763
3764         /* enable this debug print to view the transmission details
3765         DP(NETIF_MSG_TX_QUEUED,
3766            "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3767            txdata->cid, fp_index, txdata_index, txdata, fp); */
3768
3769         if (unlikely(bnx2x_tx_avail(bp, txdata) <
3770                         skb_shinfo(skb)->nr_frags +
3771                         BDS_PER_TX_PKT +
3772                         NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3773                 /* Handle special storage cases separately */
3774                 if (txdata->tx_ring_size == 0) {
3775                         struct bnx2x_eth_q_stats *q_stats =
3776                                 bnx2x_fp_qstats(bp, txdata->parent_fp);
3777                         q_stats->driver_filtered_tx_pkt++;
3778                         dev_kfree_skb(skb);
3779                         return NETDEV_TX_OK;
3780                 }
3781                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3782                 netif_tx_stop_queue(txq);
3783                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3784
3785                 return NETDEV_TX_BUSY;
3786         }
3787
3788         DP(NETIF_MSG_TX_QUEUED,
3789            "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3790            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3791            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3792            skb->len);
3793
3794         eth = (struct ethhdr *)skb->data;
3795
3796         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3797         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3798                 if (is_broadcast_ether_addr(eth->h_dest))
3799                         mac_type = BROADCAST_ADDRESS;
3800                 else
3801                         mac_type = MULTICAST_ADDRESS;
3802         }
3803
3804 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3805         /* First, check if we need to linearize the skb (due to FW
3806            restrictions). No need to check fragmentation if page size > 8K
3807            (there will be no violation to FW restrictions) */
3808         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3809                 /* Statistics of linearization */
3810                 bp->lin_cnt++;
3811                 if (skb_linearize(skb) != 0) {
3812                         DP(NETIF_MSG_TX_QUEUED,
3813                            "SKB linearization failed - silently dropping this SKB\n");
3814                         dev_kfree_skb_any(skb);
3815                         return NETDEV_TX_OK;
3816                 }
3817         }
3818 #endif
3819         /* Map skb linear data for DMA */
3820         mapping = dma_map_single(&bp->pdev->dev, skb->data,
3821                                  skb_headlen(skb), DMA_TO_DEVICE);
3822         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3823                 DP(NETIF_MSG_TX_QUEUED,
3824                    "SKB mapping failed - silently dropping this SKB\n");
3825                 dev_kfree_skb_any(skb);
3826                 return NETDEV_TX_OK;
3827         }
3828         /*
3829         Please read carefully. First we use one BD which we mark as start,
3830         then we have a parsing info BD (used for TSO or xsum),
3831         and only then we have the rest of the TSO BDs.
3832         (don't forget to mark the last one as last,
3833         and to unmap only AFTER you write to the BD ...)
3834         And above all, all pdb sizes are in words - NOT DWORDS!
3835         */
3836
3837         /* get current pkt produced now - advance it just before sending packet
3838          * since mapping of pages may fail and cause packet to be dropped
3839          */
3840         pkt_prod = txdata->tx_pkt_prod;
3841         bd_prod = TX_BD(txdata->tx_bd_prod);
3842
3843         /* get a tx_buf and first BD
3844          * tx_start_bd may be changed during SPLIT,
3845          * but first_bd will always stay first
3846          */
3847         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3848         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3849         first_bd = tx_start_bd;
3850
3851         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3852
3853         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3854                 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3855                         BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3856                 } else if (bp->ptp_tx_skb) {
3857                         BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3858                 } else {
3859                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3860                         /* schedule check for Tx timestamp */
3861                         bp->ptp_tx_skb = skb_get(skb);
3862                         bp->ptp_tx_start = jiffies;
3863                         schedule_work(&bp->ptp_task);
3864                 }
3865         }
3866
3867         /* header nbd: indirectly zero other flags! */
3868         tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3869
3870         /* remember the first BD of the packet */
3871         tx_buf->first_bd = txdata->tx_bd_prod;
3872         tx_buf->skb = skb;
3873         tx_buf->flags = 0;
3874
3875         DP(NETIF_MSG_TX_QUEUED,
3876            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3877            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3878
3879         if (skb_vlan_tag_present(skb)) {
3880                 tx_start_bd->vlan_or_ethertype =
3881                     cpu_to_le16(skb_vlan_tag_get(skb));
3882                 tx_start_bd->bd_flags.as_bitfield |=
3883                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3884         } else {
3885                 /* when transmitting in a vf, start bd must hold the ethertype
3886                  * for fw to enforce it
3887                  */
3888 #ifndef BNX2X_STOP_ON_ERROR
3889                 if (IS_VF(bp))
3890 #endif
3891                         tx_start_bd->vlan_or_ethertype =
3892                                 cpu_to_le16(ntohs(eth->h_proto));
3893 #ifndef BNX2X_STOP_ON_ERROR
3894                 else
3895                         /* used by FW for packet accounting */
3896                         tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3897 #endif
3898         }
3899
3900         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3901
3902         /* turn on parsing and get a BD */
3903         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3904
3905         if (xmit_type & XMIT_CSUM)
3906                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3907
3908         if (!CHIP_IS_E1x(bp)) {
3909                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3910                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3911
3912                 if (xmit_type & XMIT_CSUM_ENC) {
3913                         u16 global_data = 0;
3914
3915                         /* Set PBD in enc checksum offload case */
3916                         hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3917                                                       &pbd_e2_parsing_data,
3918                                                       xmit_type);
3919
3920                         /* turn on 2nd parsing and get a BD */
3921                         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3922
3923                         pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3924
3925                         memset(pbd2, 0, sizeof(*pbd2));
3926
3927                         pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3928                                 (skb_inner_network_header(skb) -
3929                                  skb->data) >> 1;
3930
3931                         if (xmit_type & XMIT_GSO_ENC)
3932                                 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3933                                                           &global_data,
3934                                                           xmit_type);
3935
3936                         pbd2->global_data = cpu_to_le16(global_data);
3937
3938                         /* add addition parse BD indication to start BD */
3939                         SET_FLAG(tx_start_bd->general_data,
3940                                  ETH_TX_START_BD_PARSE_NBDS, 1);
3941                         /* set encapsulation flag in start BD */
3942                         SET_FLAG(tx_start_bd->general_data,
3943                                  ETH_TX_START_BD_TUNNEL_EXIST, 1);
3944
3945                         tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3946
3947                         nbd++;
3948                 } else if (xmit_type & XMIT_CSUM) {
3949                         /* Set PBD in checksum offload case w/o encapsulation */
3950                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3951                                                      &pbd_e2_parsing_data,
3952                                                      xmit_type);
3953                 }
3954
3955                 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3956                 /* Add the macs to the parsing BD if this is a vf or if
3957                  * Tx Switching is enabled.
3958                  */
3959                 if (IS_VF(bp)) {
3960                         /* override GRE parameters in BD */
3961                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3962                                               &pbd_e2->data.mac_addr.src_mid,
3963                                               &pbd_e2->data.mac_addr.src_lo,
3964                                               eth->h_source);
3965
3966                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3967                                               &pbd_e2->data.mac_addr.dst_mid,
3968                                               &pbd_e2->data.mac_addr.dst_lo,
3969                                               eth->h_dest);
3970                 } else {
3971                         if (bp->flags & TX_SWITCHING)
3972                                 bnx2x_set_fw_mac_addr(
3973                                                 &pbd_e2->data.mac_addr.dst_hi,
3974                                                 &pbd_e2->data.mac_addr.dst_mid,
3975                                                 &pbd_e2->data.mac_addr.dst_lo,
3976                                                 eth->h_dest);
3977 #ifdef BNX2X_STOP_ON_ERROR
3978                         /* Enforce security is always set in Stop on Error -
3979                          * source mac should be present in the parsing BD
3980                          */
3981                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3982                                               &pbd_e2->data.mac_addr.src_mid,
3983                                               &pbd_e2->data.mac_addr.src_lo,
3984                                               eth->h_source);
3985 #endif
3986                 }
3987
3988                 SET_FLAG(pbd_e2_parsing_data,
3989                          ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3990         } else {
3991                 u16 global_data = 0;
3992                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3993                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3994                 /* Set PBD in checksum offload case */
3995                 if (xmit_type & XMIT_CSUM)
3996                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3997
3998                 SET_FLAG(global_data,
3999                          ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4000                 pbd_e1x->global_data |= cpu_to_le16(global_data);
4001         }
4002
4003         /* Setup the data pointer of the first BD of the packet */
4004         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4005         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4006         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4007         pkt_size = tx_start_bd->nbytes;
4008
4009         DP(NETIF_MSG_TX_QUEUED,
4010            "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4011            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4012            le16_to_cpu(tx_start_bd->nbytes),
4013            tx_start_bd->bd_flags.as_bitfield,
4014            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4015
4016         if (xmit_type & XMIT_GSO) {
4017
4018                 DP(NETIF_MSG_TX_QUEUED,
4019                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4020                    skb->len, hlen, skb_headlen(skb),
4021                    skb_shinfo(skb)->gso_size);
4022
4023                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4024
4025                 if (unlikely(skb_headlen(skb) > hlen)) {
4026                         nbd++;
4027                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4028                                                  &tx_start_bd, hlen,
4029                                                  bd_prod);
4030                 }
4031                 if (!CHIP_IS_E1x(bp))
4032                         pbd_e2_parsing_data |=
4033                                 (skb_shinfo(skb)->gso_size <<
4034                                  ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4035                                  ETH_TX_PARSE_BD_E2_LSO_MSS;
4036                 else
4037                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4038         }
4039
4040         /* Set the PBD's parsing_data field if not zero
4041          * (for the chips newer than 57711).
4042          */
4043         if (pbd_e2_parsing_data)
4044                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4045
4046         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4047
4048         /* Handle fragmented skb */
4049         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4050                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4051
4052                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4053                                            skb_frag_size(frag), DMA_TO_DEVICE);
4054                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4055                         unsigned int pkts_compl = 0, bytes_compl = 0;
4056
4057                         DP(NETIF_MSG_TX_QUEUED,
4058                            "Unable to map page - dropping packet...\n");
4059
4060                         /* we need unmap all buffers already mapped
4061                          * for this SKB;
4062                          * first_bd->nbd need to be properly updated
4063                          * before call to bnx2x_free_tx_pkt
4064                          */
4065                         first_bd->nbd = cpu_to_le16(nbd);
4066                         bnx2x_free_tx_pkt(bp, txdata,
4067                                           TX_BD(txdata->tx_pkt_prod),
4068                                           &pkts_compl, &bytes_compl);
4069                         return NETDEV_TX_OK;
4070                 }
4071
4072                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4073                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4074                 if (total_pkt_bd == NULL)
4075                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4076
4077                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4078                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4079                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4080                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4081                 nbd++;
4082
4083                 DP(NETIF_MSG_TX_QUEUED,
4084                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4085                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4086                    le16_to_cpu(tx_data_bd->nbytes));
4087         }
4088
4089         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4090
4091         /* update with actual num BDs */
4092         first_bd->nbd = cpu_to_le16(nbd);
4093
4094         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4095
4096         /* now send a tx doorbell, counting the next BD
4097          * if the packet contains or ends with it
4098          */
4099         if (TX_BD_POFF(bd_prod) < nbd)
4100                 nbd++;
4101
4102         /* total_pkt_bytes should be set on the first data BD if
4103          * it's not an LSO packet and there is more than one
4104          * data BD. In this case pkt_size is limited by an MTU value.
4105          * However we prefer to set it for an LSO packet (while we don't
4106          * have to) in order to save some CPU cycles in a none-LSO
4107          * case, when we much more care about them.
4108          */
4109         if (total_pkt_bd != NULL)
4110                 total_pkt_bd->total_pkt_bytes = pkt_size;
4111
4112         if (pbd_e1x)
4113                 DP(NETIF_MSG_TX_QUEUED,
4114                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4115                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4116                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4117                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4118                     le16_to_cpu(pbd_e1x->total_hlen_w));
4119         if (pbd_e2)
4120                 DP(NETIF_MSG_TX_QUEUED,
4121                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4122                    pbd_e2,
4123                    pbd_e2->data.mac_addr.dst_hi,
4124                    pbd_e2->data.mac_addr.dst_mid,
4125                    pbd_e2->data.mac_addr.dst_lo,
4126                    pbd_e2->data.mac_addr.src_hi,
4127                    pbd_e2->data.mac_addr.src_mid,
4128                    pbd_e2->data.mac_addr.src_lo,
4129                    pbd_e2->parsing_data);
4130         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4131
4132         netdev_tx_sent_queue(txq, skb->len);
4133
4134         skb_tx_timestamp(skb);
4135
4136         txdata->tx_pkt_prod++;
4137         /*
4138          * Make sure that the BD data is updated before updating the producer
4139          * since FW might read the BD right after the producer is updated.
4140          * This is only applicable for weak-ordered memory model archs such
4141          * as IA-64. The following barrier is also mandatory since FW will
4142          * assumes packets must have BDs.
4143          */
4144         wmb();
4145
4146         txdata->tx_db.data.prod += nbd;
4147         barrier();
4148
4149         DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4150
4151         mmiowb();
4152
4153         txdata->tx_bd_prod += nbd;
4154
4155         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4156                 netif_tx_stop_queue(txq);
4157
4158                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4159                  * ordering of set_bit() in netif_tx_stop_queue() and read of
4160                  * fp->bd_tx_cons */
4161                 smp_mb();
4162
4163                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4164                 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4165                         netif_tx_wake_queue(txq);
4166         }
4167         txdata->tx_pkt++;
4168
4169         return NETDEV_TX_OK;
4170 }
4171
4172 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4173 {
4174         int mfw_vn = BP_FW_MB_IDX(bp);
4175         u32 tmp;
4176
4177         /* If the shmem shouldn't affect configuration, reflect */
4178         if (!IS_MF_BD(bp)) {
4179                 int i;
4180
4181                 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4182                         c2s_map[i] = i;
4183                 *c2s_default = 0;
4184
4185                 return;
4186         }
4187
4188         tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4189         tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4190         c2s_map[0] = tmp & 0xff;
4191         c2s_map[1] = (tmp >> 8) & 0xff;
4192         c2s_map[2] = (tmp >> 16) & 0xff;
4193         c2s_map[3] = (tmp >> 24) & 0xff;
4194
4195         tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4196         tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4197         c2s_map[4] = tmp & 0xff;
4198         c2s_map[5] = (tmp >> 8) & 0xff;
4199         c2s_map[6] = (tmp >> 16) & 0xff;
4200         c2s_map[7] = (tmp >> 24) & 0xff;
4201
4202         tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4203         tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4204         *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4205 }
4206
4207 /**
4208  * bnx2x_setup_tc - routine to configure net_device for multi tc
4209  *
4210  * @netdev: net device to configure
4211  * @tc: number of traffic classes to enable
4212  *
4213  * callback connected to the ndo_setup_tc function pointer
4214  */
4215 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4216 {
4217         struct bnx2x *bp = netdev_priv(dev);
4218         u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4219         int cos, prio, count, offset;
4220
4221         /* setup tc must be called under rtnl lock */
4222         ASSERT_RTNL();
4223
4224         /* no traffic classes requested. Aborting */
4225         if (!num_tc) {
4226                 netdev_reset_tc(dev);
4227                 return 0;
4228         }
4229
4230         /* requested to support too many traffic classes */
4231         if (num_tc > bp->max_cos) {
4232                 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4233                           num_tc, bp->max_cos);
4234                 return -EINVAL;
4235         }
4236
4237         /* declare amount of supported traffic classes */
4238         if (netdev_set_num_tc(dev, num_tc)) {
4239                 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4240                 return -EINVAL;
4241         }
4242
4243         bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4244
4245         /* configure priority to traffic class mapping */
4246         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4247                 int outer_prio = c2s_map[prio];
4248
4249                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4250                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4251                    "mapping priority %d to tc %d\n",
4252                    outer_prio, bp->prio_to_cos[outer_prio]);
4253         }
4254
4255         /* Use this configuration to differentiate tc0 from other COSes
4256            This can be used for ets or pfc, and save the effort of setting
4257            up a multio class queue disc or negotiating DCBX with a switch
4258         netdev_set_prio_tc_map(dev, 0, 0);
4259         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4260         for (prio = 1; prio < 16; prio++) {
4261                 netdev_set_prio_tc_map(dev, prio, 1);
4262                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4263         } */
4264
4265         /* configure traffic class to transmission queue mapping */
4266         for (cos = 0; cos < bp->max_cos; cos++) {
4267                 count = BNX2X_NUM_ETH_QUEUES(bp);
4268                 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4269                 netdev_set_tc_queue(dev, cos, count, offset);
4270                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4271                    "mapping tc %d to offset %d count %d\n",
4272                    cos, offset, count);
4273         }
4274
4275         return 0;
4276 }
4277
4278 int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
4279                      struct tc_to_netdev *tc)
4280 {
4281         if (tc->type != TC_SETUP_MQPRIO)
4282                 return -EINVAL;
4283         return bnx2x_setup_tc(dev, tc->tc);
4284 }
4285
4286 /* called with rtnl_lock */
4287 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4288 {
4289         struct sockaddr *addr = p;
4290         struct bnx2x *bp = netdev_priv(dev);
4291         int rc = 0;
4292
4293         if (!is_valid_ether_addr(addr->sa_data)) {
4294                 BNX2X_ERR("Requested MAC address is not valid\n");
4295                 return -EINVAL;
4296         }
4297
4298         if (IS_MF_STORAGE_ONLY(bp)) {
4299                 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4300                 return -EINVAL;
4301         }
4302
4303         if (netif_running(dev))  {
4304                 rc = bnx2x_set_eth_mac(bp, false);
4305                 if (rc)
4306                         return rc;
4307         }
4308
4309         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4310
4311         if (netif_running(dev))
4312                 rc = bnx2x_set_eth_mac(bp, true);
4313
4314         if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4315                 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4316
4317         return rc;
4318 }
4319
4320 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4321 {
4322         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4323         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4324         u8 cos;
4325
4326         /* Common */
4327
4328         if (IS_FCOE_IDX(fp_index)) {
4329                 memset(sb, 0, sizeof(union host_hc_status_block));
4330                 fp->status_blk_mapping = 0;
4331         } else {
4332                 /* status blocks */
4333                 if (!CHIP_IS_E1x(bp))
4334                         BNX2X_PCI_FREE(sb->e2_sb,
4335                                        bnx2x_fp(bp, fp_index,
4336                                                 status_blk_mapping),
4337                                        sizeof(struct host_hc_status_block_e2));
4338                 else
4339                         BNX2X_PCI_FREE(sb->e1x_sb,
4340                                        bnx2x_fp(bp, fp_index,
4341                                                 status_blk_mapping),
4342                                        sizeof(struct host_hc_status_block_e1x));
4343         }
4344
4345         /* Rx */
4346         if (!skip_rx_queue(bp, fp_index)) {
4347                 bnx2x_free_rx_bds(fp);
4348
4349                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4350                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4351                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4352                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
4353                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4354
4355                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4356                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
4357                                sizeof(struct eth_fast_path_rx_cqe) *
4358                                NUM_RCQ_BD);
4359
4360                 /* SGE ring */
4361                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4362                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4363                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
4364                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4365         }
4366
4367         /* Tx */
4368         if (!skip_tx_queue(bp, fp_index)) {
4369                 /* fastpath tx rings: tx_buf tx_desc */
4370                 for_each_cos_in_tx_queue(fp, cos) {
4371                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4372
4373                         DP(NETIF_MSG_IFDOWN,
4374                            "freeing tx memory of fp %d cos %d cid %d\n",
4375                            fp_index, cos, txdata->cid);
4376
4377                         BNX2X_FREE(txdata->tx_buf_ring);
4378                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
4379                                 txdata->tx_desc_mapping,
4380                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4381                 }
4382         }
4383         /* end of fastpath */
4384 }
4385
4386 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4387 {
4388         int i;
4389         for_each_cnic_queue(bp, i)
4390                 bnx2x_free_fp_mem_at(bp, i);
4391 }
4392
4393 void bnx2x_free_fp_mem(struct bnx2x *bp)
4394 {
4395         int i;
4396         for_each_eth_queue(bp, i)
4397                 bnx2x_free_fp_mem_at(bp, i);
4398 }
4399
4400 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4401 {
4402         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4403         if (!CHIP_IS_E1x(bp)) {
4404                 bnx2x_fp(bp, index, sb_index_values) =
4405                         (__le16 *)status_blk.e2_sb->sb.index_values;
4406                 bnx2x_fp(bp, index, sb_running_index) =
4407                         (__le16 *)status_blk.e2_sb->sb.running_index;
4408         } else {
4409                 bnx2x_fp(bp, index, sb_index_values) =
4410                         (__le16 *)status_blk.e1x_sb->sb.index_values;
4411                 bnx2x_fp(bp, index, sb_running_index) =
4412                         (__le16 *)status_blk.e1x_sb->sb.running_index;
4413         }
4414 }
4415
4416 /* Returns the number of actually allocated BDs */
4417 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4418                               int rx_ring_size)
4419 {
4420         struct bnx2x *bp = fp->bp;
4421         u16 ring_prod, cqe_ring_prod;
4422         int i, failure_cnt = 0;
4423
4424         fp->rx_comp_cons = 0;
4425         cqe_ring_prod = ring_prod = 0;
4426
4427         /* This routine is called only during fo init so
4428          * fp->eth_q_stats.rx_skb_alloc_failed = 0
4429          */
4430         for (i = 0; i < rx_ring_size; i++) {
4431                 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4432                         failure_cnt++;
4433                         continue;
4434                 }
4435                 ring_prod = NEXT_RX_IDX(ring_prod);
4436                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4437                 WARN_ON(ring_prod <= (i - failure_cnt));
4438         }
4439
4440         if (failure_cnt)
4441                 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4442                           i - failure_cnt, fp->index);
4443
4444         fp->rx_bd_prod = ring_prod;
4445         /* Limit the CQE producer by the CQE ring size */
4446         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4447                                cqe_ring_prod);
4448
4449         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4450
4451         return i - failure_cnt;
4452 }
4453
4454 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4455 {
4456         int i;
4457
4458         for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4459                 struct eth_rx_cqe_next_page *nextpg;
4460
4461                 nextpg = (struct eth_rx_cqe_next_page *)
4462                         &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4463                 nextpg->addr_hi =
4464                         cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4465                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4466                 nextpg->addr_lo =
4467                         cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4468                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4469         }
4470 }
4471
4472 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4473 {
4474         union host_hc_status_block *sb;
4475         struct bnx2x_fastpath *fp = &bp->fp[index];
4476         int ring_size = 0;
4477         u8 cos;
4478         int rx_ring_size = 0;
4479
4480         if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4481                 rx_ring_size = MIN_RX_SIZE_NONTPA;
4482                 bp->rx_ring_size = rx_ring_size;
4483         } else if (!bp->rx_ring_size) {
4484                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4485
4486                 if (CHIP_IS_E3(bp)) {
4487                         u32 cfg = SHMEM_RD(bp,
4488                                            dev_info.port_hw_config[BP_PORT(bp)].
4489                                            default_cfg);
4490
4491                         /* Decrease ring size for 1G functions */
4492                         if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4493                             PORT_HW_CFG_NET_SERDES_IF_SGMII)
4494                                 rx_ring_size /= 10;
4495                 }
4496
4497                 /* allocate at least number of buffers required by FW */
4498                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4499                                      MIN_RX_SIZE_TPA, rx_ring_size);
4500
4501                 bp->rx_ring_size = rx_ring_size;
4502         } else /* if rx_ring_size specified - use it */
4503                 rx_ring_size = bp->rx_ring_size;
4504
4505         DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4506
4507         /* Common */
4508         sb = &bnx2x_fp(bp, index, status_blk);
4509
4510         if (!IS_FCOE_IDX(index)) {
4511                 /* status blocks */
4512                 if (!CHIP_IS_E1x(bp)) {
4513                         sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4514                                                     sizeof(struct host_hc_status_block_e2));
4515                         if (!sb->e2_sb)
4516                                 goto alloc_mem_err;
4517                 } else {
4518                         sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4519                                                      sizeof(struct host_hc_status_block_e1x));
4520                         if (!sb->e1x_sb)
4521                                 goto alloc_mem_err;
4522                 }
4523         }
4524
4525         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4526          * set shortcuts for it.
4527          */
4528         if (!IS_FCOE_IDX(index))
4529                 set_sb_shortcuts(bp, index);
4530
4531         /* Tx */
4532         if (!skip_tx_queue(bp, index)) {
4533                 /* fastpath tx rings: tx_buf tx_desc */
4534                 for_each_cos_in_tx_queue(fp, cos) {
4535                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4536
4537                         DP(NETIF_MSG_IFUP,
4538                            "allocating tx memory of fp %d cos %d\n",
4539                            index, cos);
4540
4541                         txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4542                                                       sizeof(struct sw_tx_bd),
4543                                                       GFP_KERNEL);
4544                         if (!txdata->tx_buf_ring)
4545                                 goto alloc_mem_err;
4546                         txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4547                                                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4548                         if (!txdata->tx_desc_ring)
4549                                 goto alloc_mem_err;
4550                 }
4551         }
4552
4553         /* Rx */
4554         if (!skip_rx_queue(bp, index)) {
4555                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4556                 bnx2x_fp(bp, index, rx_buf_ring) =
4557                         kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4558                 if (!bnx2x_fp(bp, index, rx_buf_ring))
4559                         goto alloc_mem_err;
4560                 bnx2x_fp(bp, index, rx_desc_ring) =
4561                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4562                                         sizeof(struct eth_rx_bd) * NUM_RX_BD);
4563                 if (!bnx2x_fp(bp, index, rx_desc_ring))
4564                         goto alloc_mem_err;
4565
4566                 /* Seed all CQEs by 1s */
4567                 bnx2x_fp(bp, index, rx_comp_ring) =
4568                         BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4569                                          sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4570                 if (!bnx2x_fp(bp, index, rx_comp_ring))
4571                         goto alloc_mem_err;
4572
4573                 /* SGE ring */
4574                 bnx2x_fp(bp, index, rx_page_ring) =
4575                         kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4576                                 GFP_KERNEL);
4577                 if (!bnx2x_fp(bp, index, rx_page_ring))
4578                         goto alloc_mem_err;
4579                 bnx2x_fp(bp, index, rx_sge_ring) =
4580                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4581                                         BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4582                 if (!bnx2x_fp(bp, index, rx_sge_ring))
4583                         goto alloc_mem_err;
4584                 /* RX BD ring */
4585                 bnx2x_set_next_page_rx_bd(fp);
4586
4587                 /* CQ ring */
4588                 bnx2x_set_next_page_rx_cq(fp);
4589
4590                 /* BDs */
4591                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4592                 if (ring_size < rx_ring_size)
4593                         goto alloc_mem_err;
4594         }
4595
4596         return 0;
4597
4598 /* handles low memory cases */
4599 alloc_mem_err:
4600         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4601                                                 index, ring_size);
4602         /* FW will drop all packets if queue is not big enough,
4603          * In these cases we disable the queue
4604          * Min size is different for OOO, TPA and non-TPA queues
4605          */
4606         if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4607                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4608                         /* release memory allocated for this queue */
4609                         bnx2x_free_fp_mem_at(bp, index);
4610                         return -ENOMEM;
4611         }
4612         return 0;
4613 }
4614
4615 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4616 {
4617         if (!NO_FCOE(bp))
4618                 /* FCoE */
4619                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4620                         /* we will fail load process instead of mark
4621                          * NO_FCOE_FLAG
4622                          */
4623                         return -ENOMEM;
4624
4625         return 0;
4626 }
4627
4628 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4629 {
4630         int i;
4631
4632         /* 1. Allocate FP for leading - fatal if error
4633          * 2. Allocate RSS - fix number of queues if error
4634          */
4635
4636         /* leading */
4637         if (bnx2x_alloc_fp_mem_at(bp, 0))
4638                 return -ENOMEM;
4639
4640         /* RSS */
4641         for_each_nondefault_eth_queue(bp, i)
4642                 if (bnx2x_alloc_fp_mem_at(bp, i))
4643                         break;
4644
4645         /* handle memory failures */
4646         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4647                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4648
4649                 WARN_ON(delta < 0);
4650                 bnx2x_shrink_eth_fp(bp, delta);
4651                 if (CNIC_SUPPORT(bp))
4652                         /* move non eth FPs next to last eth FP
4653                          * must be done in that order
4654                          * FCOE_IDX < FWD_IDX < OOO_IDX
4655                          */
4656
4657                         /* move FCoE fp even NO_FCOE_FLAG is on */
4658                         bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4659                 bp->num_ethernet_queues -= delta;
4660                 bp->num_queues = bp->num_ethernet_queues +
4661                                  bp->num_cnic_queues;
4662                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4663                           bp->num_queues + delta, bp->num_queues);
4664         }
4665
4666         return 0;
4667 }
4668
4669 void bnx2x_free_mem_bp(struct bnx2x *bp)
4670 {
4671         int i;
4672
4673         for (i = 0; i < bp->fp_array_size; i++)
4674                 kfree(bp->fp[i].tpa_info);
4675         kfree(bp->fp);
4676         kfree(bp->sp_objs);
4677         kfree(bp->fp_stats);
4678         kfree(bp->bnx2x_txq);
4679         kfree(bp->msix_table);
4680         kfree(bp->ilt);
4681 }
4682
4683 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4684 {
4685         struct bnx2x_fastpath *fp;
4686         struct msix_entry *tbl;
4687         struct bnx2x_ilt *ilt;
4688         int msix_table_size = 0;
4689         int fp_array_size, txq_array_size;
4690         int i;
4691
4692         /*
4693          * The biggest MSI-X table we might need is as a maximum number of fast
4694          * path IGU SBs plus default SB (for PF only).
4695          */
4696         msix_table_size = bp->igu_sb_cnt;
4697         if (IS_PF(bp))
4698                 msix_table_size++;
4699         BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4700
4701         /* fp array: RSS plus CNIC related L2 queues */
4702         fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4703         bp->fp_array_size = fp_array_size;
4704         BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4705
4706         fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4707         if (!fp)
4708                 goto alloc_err;
4709         for (i = 0; i < bp->fp_array_size; i++) {
4710                 fp[i].tpa_info =
4711                         kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4712                                 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4713                 if (!(fp[i].tpa_info))
4714                         goto alloc_err;
4715         }
4716
4717         bp->fp = fp;
4718
4719         /* allocate sp objs */
4720         bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4721                               GFP_KERNEL);
4722         if (!bp->sp_objs)
4723                 goto alloc_err;
4724
4725         /* allocate fp_stats */
4726         bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4727                                GFP_KERNEL);
4728         if (!bp->fp_stats)
4729                 goto alloc_err;
4730
4731         /* Allocate memory for the transmission queues array */
4732         txq_array_size =
4733                 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4734         BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4735
4736         bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4737                                 GFP_KERNEL);
4738         if (!bp->bnx2x_txq)
4739                 goto alloc_err;
4740
4741         /* msix table */
4742         tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4743         if (!tbl)
4744                 goto alloc_err;
4745         bp->msix_table = tbl;
4746
4747         /* ilt */
4748         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4749         if (!ilt)
4750                 goto alloc_err;
4751         bp->ilt = ilt;
4752
4753         return 0;
4754 alloc_err:
4755         bnx2x_free_mem_bp(bp);
4756         return -ENOMEM;
4757 }
4758
4759 int bnx2x_reload_if_running(struct net_device *dev)
4760 {
4761         struct bnx2x *bp = netdev_priv(dev);
4762
4763         if (unlikely(!netif_running(dev)))
4764                 return 0;
4765
4766         bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4767         return bnx2x_nic_load(bp, LOAD_NORMAL);
4768 }
4769
4770 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4771 {
4772         u32 sel_phy_idx = 0;
4773         if (bp->link_params.num_phys <= 1)
4774                 return INT_PHY;
4775
4776         if (bp->link_vars.link_up) {
4777                 sel_phy_idx = EXT_PHY1;
4778                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4779                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4780                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4781                         sel_phy_idx = EXT_PHY2;
4782         } else {
4783
4784                 switch (bnx2x_phy_selection(&bp->link_params)) {
4785                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4786                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4787                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4788                        sel_phy_idx = EXT_PHY1;
4789                        break;
4790                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4791                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4792                        sel_phy_idx = EXT_PHY2;
4793                        break;
4794                 }
4795         }
4796
4797         return sel_phy_idx;
4798 }
4799 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4800 {
4801         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4802         /*
4803          * The selected activated PHY is always after swapping (in case PHY
4804          * swapping is enabled). So when swapping is enabled, we need to reverse
4805          * the configuration
4806          */
4807
4808         if (bp->link_params.multi_phy_config &
4809             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4810                 if (sel_phy_idx == EXT_PHY1)
4811                         sel_phy_idx = EXT_PHY2;
4812                 else if (sel_phy_idx == EXT_PHY2)
4813                         sel_phy_idx = EXT_PHY1;
4814         }
4815         return LINK_CONFIG_IDX(sel_phy_idx);
4816 }
4817
4818 #ifdef NETDEV_FCOE_WWNN
4819 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4820 {
4821         struct bnx2x *bp = netdev_priv(dev);
4822         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4823
4824         switch (type) {
4825         case NETDEV_FCOE_WWNN:
4826                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4827                                 cp->fcoe_wwn_node_name_lo);
4828                 break;
4829         case NETDEV_FCOE_WWPN:
4830                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4831                                 cp->fcoe_wwn_port_name_lo);
4832                 break;
4833         default:
4834                 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4835                 return -EINVAL;
4836         }
4837
4838         return 0;
4839 }
4840 #endif
4841
4842 /* called with rtnl_lock */
4843 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4844 {
4845         struct bnx2x *bp = netdev_priv(dev);
4846
4847         if (pci_num_vf(bp->pdev)) {
4848                 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4849                 return -EPERM;
4850         }
4851
4852         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4853                 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4854                 return -EAGAIN;
4855         }
4856
4857         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4858             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4859                 BNX2X_ERR("Can't support requested MTU size\n");
4860                 return -EINVAL;
4861         }
4862
4863         /* This does not race with packet allocation
4864          * because the actual alloc size is
4865          * only updated as part of load
4866          */
4867         dev->mtu = new_mtu;
4868
4869         if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4870                 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4871
4872         return bnx2x_reload_if_running(dev);
4873 }
4874
4875 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4876                                      netdev_features_t features)
4877 {
4878         struct bnx2x *bp = netdev_priv(dev);
4879
4880         if (pci_num_vf(bp->pdev)) {
4881                 netdev_features_t changed = dev->features ^ features;
4882
4883                 /* Revert the requested changes in features if they
4884                  * would require internal reload of PF in bnx2x_set_features().
4885                  */
4886                 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4887                         features &= ~NETIF_F_RXCSUM;
4888                         features |= dev->features & NETIF_F_RXCSUM;
4889                 }
4890
4891                 if (changed & NETIF_F_LOOPBACK) {
4892                         features &= ~NETIF_F_LOOPBACK;
4893                         features |= dev->features & NETIF_F_LOOPBACK;
4894                 }
4895         }
4896
4897         /* TPA requires Rx CSUM offloading */
4898         if (!(features & NETIF_F_RXCSUM)) {
4899                 features &= ~NETIF_F_LRO;
4900                 features &= ~NETIF_F_GRO;
4901         }
4902
4903         return features;
4904 }
4905
4906 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4907 {
4908         struct bnx2x *bp = netdev_priv(dev);
4909         netdev_features_t changes = features ^ dev->features;
4910         bool bnx2x_reload = false;
4911         int rc;
4912
4913         /* VFs or non SRIOV PFs should be able to change loopback feature */
4914         if (!pci_num_vf(bp->pdev)) {
4915                 if (features & NETIF_F_LOOPBACK) {
4916                         if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4917                                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4918                                 bnx2x_reload = true;
4919                         }
4920                 } else {
4921                         if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4922                                 bp->link_params.loopback_mode = LOOPBACK_NONE;
4923                                 bnx2x_reload = true;
4924                         }
4925                 }
4926         }
4927
4928         /* if GRO is changed while LRO is enabled, don't force a reload */
4929         if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4930                 changes &= ~NETIF_F_GRO;
4931
4932         /* if GRO is changed while HW TPA is off, don't force a reload */
4933         if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4934                 changes &= ~NETIF_F_GRO;
4935
4936         if (changes)
4937                 bnx2x_reload = true;
4938
4939         if (bnx2x_reload) {
4940                 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4941                         dev->features = features;
4942                         rc = bnx2x_reload_if_running(dev);
4943                         return rc ? rc : 1;
4944                 }
4945                 /* else: bnx2x_nic_load() will be called at end of recovery */
4946         }
4947
4948         return 0;
4949 }
4950
4951 void bnx2x_tx_timeout(struct net_device *dev)
4952 {
4953         struct bnx2x *bp = netdev_priv(dev);
4954
4955 #ifdef BNX2X_STOP_ON_ERROR
4956         if (!bp->panic)
4957                 bnx2x_panic();
4958 #endif
4959
4960         /* This allows the netif to be shutdown gracefully before resetting */
4961         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4962 }
4963
4964 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4965 {
4966         struct net_device *dev = pci_get_drvdata(pdev);
4967         struct bnx2x *bp;
4968
4969         if (!dev) {
4970                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4971                 return -ENODEV;
4972         }
4973         bp = netdev_priv(dev);
4974
4975         rtnl_lock();
4976
4977         pci_save_state(pdev);
4978
4979         if (!netif_running(dev)) {
4980                 rtnl_unlock();
4981                 return 0;
4982         }
4983
4984         netif_device_detach(dev);
4985
4986         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4987
4988         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4989
4990         rtnl_unlock();
4991
4992         return 0;
4993 }
4994
4995 int bnx2x_resume(struct pci_dev *pdev)
4996 {
4997         struct net_device *dev = pci_get_drvdata(pdev);
4998         struct bnx2x *bp;
4999         int rc;
5000
5001         if (!dev) {
5002                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5003                 return -ENODEV;
5004         }
5005         bp = netdev_priv(dev);
5006
5007         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5008                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5009                 return -EAGAIN;
5010         }
5011
5012         rtnl_lock();
5013
5014         pci_restore_state(pdev);
5015
5016         if (!netif_running(dev)) {
5017                 rtnl_unlock();
5018                 return 0;
5019         }
5020
5021         bnx2x_set_power_state(bp, PCI_D0);
5022         netif_device_attach(dev);
5023
5024         rc = bnx2x_nic_load(bp, LOAD_OPEN);
5025
5026         rtnl_unlock();
5027
5028         return rc;
5029 }
5030
5031 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5032                               u32 cid)
5033 {
5034         if (!cxt) {
5035                 BNX2X_ERR("bad context pointer %p\n", cxt);
5036                 return;
5037         }
5038
5039         /* ustorm cxt validation */
5040         cxt->ustorm_ag_context.cdu_usage =
5041                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5042                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5043         /* xcontext validation */
5044         cxt->xstorm_ag_context.cdu_reserved =
5045                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5046                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5047 }
5048
5049 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5050                                     u8 fw_sb_id, u8 sb_index,
5051                                     u8 ticks)
5052 {
5053         u32 addr = BAR_CSTRORM_INTMEM +
5054                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5055         REG_WR8(bp, addr, ticks);
5056         DP(NETIF_MSG_IFUP,
5057            "port %x fw_sb_id %d sb_index %d ticks %d\n",
5058            port, fw_sb_id, sb_index, ticks);
5059 }
5060
5061 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5062                                     u16 fw_sb_id, u8 sb_index,
5063                                     u8 disable)
5064 {
5065         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5066         u32 addr = BAR_CSTRORM_INTMEM +
5067                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5068         u8 flags = REG_RD8(bp, addr);
5069         /* clear and set */
5070         flags &= ~HC_INDEX_DATA_HC_ENABLED;
5071         flags |= enable_flag;
5072         REG_WR8(bp, addr, flags);
5073         DP(NETIF_MSG_IFUP,
5074            "port %x fw_sb_id %d sb_index %d disable %d\n",
5075            port, fw_sb_id, sb_index, disable);
5076 }
5077
5078 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5079                                     u8 sb_index, u8 disable, u16 usec)
5080 {
5081         int port = BP_PORT(bp);
5082         u8 ticks = usec / BNX2X_BTR;
5083
5084         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5085
5086         disable = disable ? 1 : (usec ? 0 : 1);
5087         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5088 }
5089
5090 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5091                             u32 verbose)
5092 {
5093         smp_mb__before_atomic();
5094         set_bit(flag, &bp->sp_rtnl_state);
5095         smp_mb__after_atomic();
5096         DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5097            flag);
5098         schedule_delayed_work(&bp->sp_rtnl_task, 0);
5099 }