GNU Linux-libre 4.9.337-gnu1
[releases.git] / drivers / net / ethernet / qlogic / qed / qed_ll2.c
1 /* QLogic qed NIC Driver
2  *
3  * Copyright (c) 2015 QLogic Corporation
4  *
5  * This software is available under the terms of the GNU General Public License
6  * (GPL) Version 2, available from the file COPYING in the main directory of
7  * this source tree.
8  */
9
10 #include <linux/types.h>
11 #include <asm/byteorder.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/if_vlan.h>
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/stddef.h>
18 #include <linux/version.h>
19 #include <linux/workqueue.h>
20 #include <net/ipv6.h>
21 #include <linux/bitops.h>
22 #include <linux/delay.h>
23 #include <linux/errno.h>
24 #include <linux/etherdevice.h>
25 #include <linux/io.h>
26 #include <linux/list.h>
27 #include <linux/mutex.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/qed/qed_ll2_if.h>
31 #include "qed.h"
32 #include "qed_cxt.h"
33 #include "qed_dev_api.h"
34 #include "qed_hsi.h"
35 #include "qed_hw.h"
36 #include "qed_int.h"
37 #include "qed_ll2.h"
38 #include "qed_mcp.h"
39 #include "qed_reg_addr.h"
40 #include "qed_sp.h"
41 #include "qed_roce.h"
42
43 #define QED_LL2_RX_REGISTERED(ll2)      ((ll2)->rx_queue.b_cb_registred)
44 #define QED_LL2_TX_REGISTERED(ll2)      ((ll2)->tx_queue.b_cb_registred)
45
46 #define QED_LL2_TX_SIZE (256)
47 #define QED_LL2_RX_SIZE (4096)
48
49 struct qed_cb_ll2_info {
50         int rx_cnt;
51         u32 rx_size;
52         u8 handle;
53         bool frags_mapped;
54
55         /* Lock protecting LL2 buffer lists in sleepless context */
56         spinlock_t lock;
57         struct list_head list;
58
59         const struct qed_ll2_cb_ops *cbs;
60         void *cb_cookie;
61 };
62
63 struct qed_ll2_buffer {
64         struct list_head list;
65         void *data;
66         dma_addr_t phys_addr;
67 };
68
69 static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
70                                         u8 connection_handle,
71                                         void *cookie,
72                                         dma_addr_t first_frag_addr,
73                                         bool b_last_fragment,
74                                         bool b_last_packet)
75 {
76         struct qed_dev *cdev = p_hwfn->cdev;
77         struct sk_buff *skb = cookie;
78
79         /* All we need to do is release the mapping */
80         dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
81                          skb_headlen(skb), DMA_TO_DEVICE);
82
83         if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
84                 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
85                                       b_last_fragment);
86
87         if (cdev->ll2->frags_mapped)
88                 /* Case where mapped frags were received, need to
89                  * free skb with nr_frags marked as 0
90                  */
91                 skb_shinfo(skb)->nr_frags = 0;
92
93         dev_kfree_skb_any(skb);
94 }
95
96 static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
97                                 u8 **data, dma_addr_t *phys_addr)
98 {
99         *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
100         if (!(*data)) {
101                 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
102                 return -ENOMEM;
103         }
104
105         *phys_addr = dma_map_single(&cdev->pdev->dev,
106                                     ((*data) + NET_SKB_PAD),
107                                     cdev->ll2->rx_size, DMA_FROM_DEVICE);
108         if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
109                 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
110                 kfree((*data));
111                 return -ENOMEM;
112         }
113
114         return 0;
115 }
116
117 static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
118                                  struct qed_ll2_buffer *buffer)
119 {
120         spin_lock_bh(&cdev->ll2->lock);
121
122         dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
123                          cdev->ll2->rx_size, DMA_FROM_DEVICE);
124         kfree(buffer->data);
125         list_del(&buffer->list);
126
127         cdev->ll2->rx_cnt--;
128         if (!cdev->ll2->rx_cnt)
129                 DP_INFO(cdev, "All LL2 entries were removed\n");
130
131         spin_unlock_bh(&cdev->ll2->lock);
132
133         return 0;
134 }
135
136 static void qed_ll2_kill_buffers(struct qed_dev *cdev)
137 {
138         struct qed_ll2_buffer *buffer, *tmp_buffer;
139
140         list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
141                 qed_ll2_dealloc_buffer(cdev, buffer);
142 }
143
144 static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
145                                         u8 connection_handle,
146                                         struct qed_ll2_rx_packet *p_pkt,
147                                         struct core_rx_fast_path_cqe *p_cqe,
148                                         bool b_last_packet)
149 {
150         u16 packet_length = le16_to_cpu(p_cqe->packet_length);
151         struct qed_ll2_buffer *buffer = p_pkt->cookie;
152         struct qed_dev *cdev = p_hwfn->cdev;
153         u16 vlan = le16_to_cpu(p_cqe->vlan);
154         u32 opaque_data_0, opaque_data_1;
155         u8 pad = p_cqe->placement_offset;
156         dma_addr_t new_phys_addr;
157         struct sk_buff *skb;
158         bool reuse = false;
159         int rc = -EINVAL;
160         u8 *new_data;
161
162         opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
163         opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
164
165         DP_VERBOSE(p_hwfn,
166                    (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
167                    "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
168                    (u64)p_pkt->rx_buf_addr, pad, packet_length,
169                    le16_to_cpu(p_cqe->parse_flags.flags), vlan,
170                    opaque_data_0, opaque_data_1);
171
172         if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
173                 print_hex_dump(KERN_INFO, "",
174                                DUMP_PREFIX_OFFSET, 16, 1,
175                                buffer->data, packet_length, false);
176         }
177
178         /* Determine if data is valid */
179         if (packet_length < ETH_HLEN)
180                 reuse = true;
181
182         /* Allocate a replacement for buffer; Reuse upon failure */
183         if (!reuse)
184                 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
185                                           &new_phys_addr);
186
187         /* If need to reuse or there's no replacement buffer, repost this */
188         if (rc)
189                 goto out_post;
190         dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
191                          cdev->ll2->rx_size, DMA_FROM_DEVICE);
192
193         skb = build_skb(buffer->data, 0);
194         if (!skb) {
195                 rc = -ENOMEM;
196                 goto out_post;
197         }
198
199         pad += NET_SKB_PAD;
200         skb_reserve(skb, pad);
201         skb_put(skb, packet_length);
202         skb_checksum_none_assert(skb);
203
204         /* Get parital ethernet information instead of eth_type_trans(),
205          * Since we don't have an associated net_device.
206          */
207         skb_reset_mac_header(skb);
208         skb->protocol = eth_hdr(skb)->h_proto;
209
210         /* Pass SKB onward */
211         if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
212                 if (vlan)
213                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
214                 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
215                                       opaque_data_0, opaque_data_1);
216         }
217
218         /* Update Buffer information and update FW producer */
219         buffer->data = new_data;
220         buffer->phys_addr = new_phys_addr;
221
222 out_post:
223         rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
224                                     buffer->phys_addr, 0,  buffer, 1);
225
226         if (rc)
227                 qed_ll2_dealloc_buffer(cdev, buffer);
228 }
229
230 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
231                                                     u8 connection_handle,
232                                                     bool b_lock,
233                                                     bool b_only_active)
234 {
235         struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
236
237         if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
238                 return NULL;
239
240         if (!p_hwfn->p_ll2_info)
241                 return NULL;
242
243         p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
244
245         if (b_only_active) {
246                 if (b_lock)
247                         mutex_lock(&p_ll2_conn->mutex);
248                 if (p_ll2_conn->b_active)
249                         p_ret = p_ll2_conn;
250                 if (b_lock)
251                         mutex_unlock(&p_ll2_conn->mutex);
252         } else {
253                 p_ret = p_ll2_conn;
254         }
255
256         return p_ret;
257 }
258
259 static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
260                                                   u8 connection_handle)
261 {
262         return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
263 }
264
265 static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
266                                                        u8 connection_handle)
267 {
268         return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
269 }
270
271 static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
272                                                            *p_hwfn,
273                                                            u8 connection_handle)
274 {
275         return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
276 }
277
278 static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
279 {
280         bool b_last_packet = false, b_last_frag = false;
281         struct qed_ll2_tx_packet *p_pkt = NULL;
282         struct qed_ll2_info *p_ll2_conn;
283         struct qed_ll2_tx_queue *p_tx;
284         dma_addr_t tx_frag;
285
286         p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
287         if (!p_ll2_conn)
288                 return;
289
290         p_tx = &p_ll2_conn->tx_queue;
291
292         while (!list_empty(&p_tx->active_descq)) {
293                 p_pkt = list_first_entry(&p_tx->active_descq,
294                                          struct qed_ll2_tx_packet, list_entry);
295                 if (!p_pkt)
296                         break;
297
298                 list_del(&p_pkt->list_entry);
299                 b_last_packet = list_empty(&p_tx->active_descq);
300                 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
301                 p_tx->cur_completing_packet = *p_pkt;
302                 p_tx->cur_completing_bd_idx = 1;
303                 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
304                 tx_frag = p_pkt->bds_set[0].tx_frag;
305                 if (p_ll2_conn->gsi_enable)
306                         qed_ll2b_release_tx_gsi_packet(p_hwfn,
307                                                        p_ll2_conn->my_id,
308                                                        p_pkt->cookie,
309                                                        tx_frag,
310                                                        b_last_frag,
311                                                        b_last_packet);
312                 else
313                         qed_ll2b_complete_tx_packet(p_hwfn,
314                                                     p_ll2_conn->my_id,
315                                                     p_pkt->cookie,
316                                                     tx_frag,
317                                                     b_last_frag,
318                                                     b_last_packet);
319
320         }
321 }
322
323 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
324 {
325         struct qed_ll2_info *p_ll2_conn = p_cookie;
326         struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
327         u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
328         struct qed_ll2_tx_packet *p_pkt;
329         bool b_last_frag = false;
330         unsigned long flags;
331         dma_addr_t tx_frag;
332         int rc = -EINVAL;
333
334         spin_lock_irqsave(&p_tx->lock, flags);
335         if (p_tx->b_completing_packet) {
336                 rc = -EBUSY;
337                 goto out;
338         }
339
340         new_idx = le16_to_cpu(*p_tx->p_fw_cons);
341         num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
342         while (num_bds) {
343                 if (list_empty(&p_tx->active_descq))
344                         goto out;
345
346                 p_pkt = list_first_entry(&p_tx->active_descq,
347                                          struct qed_ll2_tx_packet, list_entry);
348                 if (!p_pkt)
349                         goto out;
350
351                 p_tx->b_completing_packet = true;
352                 p_tx->cur_completing_packet = *p_pkt;
353                 num_bds_in_packet = p_pkt->bd_used;
354                 list_del(&p_pkt->list_entry);
355
356                 if (num_bds < num_bds_in_packet) {
357                         DP_NOTICE(p_hwfn,
358                                   "Rest of BDs does not cover whole packet\n");
359                         goto out;
360                 }
361
362                 num_bds -= num_bds_in_packet;
363                 p_tx->bds_idx += num_bds_in_packet;
364                 while (num_bds_in_packet--)
365                         qed_chain_consume(&p_tx->txq_chain);
366
367                 p_tx->cur_completing_bd_idx = 1;
368                 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
369                 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
370
371                 spin_unlock_irqrestore(&p_tx->lock, flags);
372                 tx_frag = p_pkt->bds_set[0].tx_frag;
373                 if (p_ll2_conn->gsi_enable)
374                         qed_ll2b_complete_tx_gsi_packet(p_hwfn,
375                                                         p_ll2_conn->my_id,
376                                                         p_pkt->cookie,
377                                                         tx_frag,
378                                                         b_last_frag, !num_bds);
379                 else
380                         qed_ll2b_complete_tx_packet(p_hwfn,
381                                                     p_ll2_conn->my_id,
382                                                     p_pkt->cookie,
383                                                     tx_frag,
384                                                     b_last_frag, !num_bds);
385                 spin_lock_irqsave(&p_tx->lock, flags);
386         }
387
388         p_tx->b_completing_packet = false;
389         rc = 0;
390 out:
391         spin_unlock_irqrestore(&p_tx->lock, flags);
392         return rc;
393 }
394
395 static int
396 qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
397                            struct qed_ll2_info *p_ll2_info,
398                            union core_rx_cqe_union *p_cqe,
399                            unsigned long lock_flags, bool b_last_cqe)
400 {
401         struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
402         struct qed_ll2_rx_packet *p_pkt = NULL;
403         u16 packet_length, parse_flags, vlan;
404         u32 src_mac_addrhi;
405         u16 src_mac_addrlo;
406
407         if (!list_empty(&p_rx->active_descq))
408                 p_pkt = list_first_entry(&p_rx->active_descq,
409                                          struct qed_ll2_rx_packet, list_entry);
410         if (!p_pkt) {
411                 DP_NOTICE(p_hwfn,
412                           "GSI Rx completion but active_descq is empty\n");
413                 return -EIO;
414         }
415
416         list_del(&p_pkt->list_entry);
417         parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
418         packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
419         vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
420         src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
421         src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
422         if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
423                 DP_NOTICE(p_hwfn,
424                           "Mismatch between active_descq and the LL2 Rx chain\n");
425         list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
426
427         spin_unlock_irqrestore(&p_rx->lock, lock_flags);
428         qed_ll2b_complete_rx_gsi_packet(p_hwfn,
429                                         p_ll2_info->my_id,
430                                         p_pkt->cookie,
431                                         p_pkt->rx_buf_addr,
432                                         packet_length,
433                                         p_cqe->rx_cqe_gsi.data_length_error,
434                                         parse_flags,
435                                         vlan,
436                                         src_mac_addrhi,
437                                         src_mac_addrlo, b_last_cqe);
438         spin_lock_irqsave(&p_rx->lock, lock_flags);
439
440         return 0;
441 }
442
443 static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
444                                       struct qed_ll2_info *p_ll2_conn,
445                                       union core_rx_cqe_union *p_cqe,
446                                       unsigned long *p_lock_flags,
447                                       bool b_last_cqe)
448 {
449         struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
450         struct qed_ll2_rx_packet *p_pkt = NULL;
451
452         if (!list_empty(&p_rx->active_descq))
453                 p_pkt = list_first_entry(&p_rx->active_descq,
454                                          struct qed_ll2_rx_packet, list_entry);
455         if (!p_pkt) {
456                 DP_NOTICE(p_hwfn,
457                           "LL2 Rx completion but active_descq is empty\n");
458                 return -EIO;
459         }
460         list_del(&p_pkt->list_entry);
461
462         if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
463                 DP_NOTICE(p_hwfn,
464                           "Mismatch between active_descq and the LL2 Rx chain\n");
465         list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
466
467         spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
468         qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
469                                     p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
470         spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
471
472         return 0;
473 }
474
475 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
476 {
477         struct qed_ll2_info *p_ll2_conn = cookie;
478         struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
479         union core_rx_cqe_union *cqe = NULL;
480         u16 cq_new_idx = 0, cq_old_idx = 0;
481         unsigned long flags = 0;
482         int rc = 0;
483
484         spin_lock_irqsave(&p_rx->lock, flags);
485         cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
486         cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
487
488         while (cq_new_idx != cq_old_idx) {
489                 bool b_last_cqe = (cq_new_idx == cq_old_idx);
490
491                 cqe = qed_chain_consume(&p_rx->rcq_chain);
492                 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
493
494                 DP_VERBOSE(p_hwfn,
495                            QED_MSG_LL2,
496                            "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
497                            cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
498
499                 switch (cqe->rx_cqe_sp.type) {
500                 case CORE_RX_CQE_TYPE_SLOW_PATH:
501                         DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
502                         rc = -EINVAL;
503                         break;
504                 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
505                         rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
506                                                         cqe, flags, b_last_cqe);
507                         break;
508                 case CORE_RX_CQE_TYPE_REGULAR:
509                         rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
510                                                         cqe, &flags,
511                                                         b_last_cqe);
512                         break;
513                 default:
514                         rc = -EIO;
515                 }
516         }
517
518         spin_unlock_irqrestore(&p_rx->lock, flags);
519         return rc;
520 }
521
522 static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
523 {
524         struct qed_ll2_info *p_ll2_conn = NULL;
525         struct qed_ll2_rx_packet *p_pkt = NULL;
526         struct qed_ll2_rx_queue *p_rx;
527
528         p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
529         if (!p_ll2_conn)
530                 return;
531
532         p_rx = &p_ll2_conn->rx_queue;
533
534         while (!list_empty(&p_rx->active_descq)) {
535                 dma_addr_t rx_buf_addr;
536                 void *cookie;
537                 bool b_last;
538
539                 p_pkt = list_first_entry(&p_rx->active_descq,
540                                          struct qed_ll2_rx_packet, list_entry);
541                 if (!p_pkt)
542                         break;
543
544                 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
545
546                 rx_buf_addr = p_pkt->rx_buf_addr;
547                 cookie = p_pkt->cookie;
548
549                 b_last = list_empty(&p_rx->active_descq);
550         }
551 }
552
553 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
554                                      struct qed_ll2_info *p_ll2_conn,
555                                      u8 action_on_error)
556 {
557         enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
558         struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
559         struct core_rx_start_ramrod_data *p_ramrod = NULL;
560         struct qed_spq_entry *p_ent = NULL;
561         struct qed_sp_init_data init_data;
562         u16 cqe_pbl_size;
563         int rc = 0;
564
565         /* Get SPQ entry */
566         memset(&init_data, 0, sizeof(init_data));
567         init_data.cid = p_ll2_conn->cid;
568         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
569         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
570
571         rc = qed_sp_init_request(p_hwfn, &p_ent,
572                                  CORE_RAMROD_RX_QUEUE_START,
573                                  PROTOCOLID_CORE, &init_data);
574         if (rc)
575                 return rc;
576
577         p_ramrod = &p_ent->ramrod.core_rx_queue_start;
578
579         p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
580         p_ramrod->sb_index = p_rx->rx_sb_index;
581         p_ramrod->complete_event_flg = 1;
582
583         p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
584         DMA_REGPAIR_LE(p_ramrod->bd_base,
585                        p_rx->rxq_chain.p_phys_addr);
586         cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
587         p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
588         DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
589                        qed_chain_get_pbl_phys(&p_rx->rcq_chain));
590
591         p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
592         p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
593         p_ramrod->queue_id = p_ll2_conn->queue_id;
594         p_ramrod->main_func_queue = 1;
595
596         if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
597             p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
598                 p_ramrod->mf_si_bcast_accept_all = 1;
599                 p_ramrod->mf_si_mcast_accept_all = 1;
600         } else {
601                 p_ramrod->mf_si_bcast_accept_all = 0;
602                 p_ramrod->mf_si_mcast_accept_all = 0;
603         }
604
605         p_ramrod->action_on_error.error_type = action_on_error;
606         p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
607         return qed_spq_post(p_hwfn, p_ent, NULL);
608 }
609
610 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
611                                      struct qed_ll2_info *p_ll2_conn)
612 {
613         enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
614         struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
615         struct core_tx_start_ramrod_data *p_ramrod = NULL;
616         struct qed_spq_entry *p_ent = NULL;
617         struct qed_sp_init_data init_data;
618         union qed_qm_pq_params pq_params;
619         u16 pq_id = 0, pbl_size;
620         int rc = -EINVAL;
621
622         if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
623                 return 0;
624
625         /* Get SPQ entry */
626         memset(&init_data, 0, sizeof(init_data));
627         init_data.cid = p_ll2_conn->cid;
628         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
629         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
630
631         rc = qed_sp_init_request(p_hwfn, &p_ent,
632                                  CORE_RAMROD_TX_QUEUE_START,
633                                  PROTOCOLID_CORE, &init_data);
634         if (rc)
635                 return rc;
636
637         p_ramrod = &p_ent->ramrod.core_tx_queue_start;
638
639         p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
640         p_ramrod->sb_index = p_tx->tx_sb_index;
641         p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
642         p_ll2_conn->tx_stats_en = 1;
643         p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
644         p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
645
646         DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
647                        qed_chain_get_pbl_phys(&p_tx->txq_chain));
648         pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
649         p_ramrod->pbl_size = cpu_to_le16(pbl_size);
650
651         memset(&pq_params, 0, sizeof(pq_params));
652         pq_params.core.tc = p_ll2_conn->tx_tc;
653         pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
654         p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
655
656         switch (conn_type) {
657         case QED_LL2_TYPE_ISCSI:
658         case QED_LL2_TYPE_ISCSI_OOO:
659                 p_ramrod->conn_type = PROTOCOLID_ISCSI;
660                 break;
661         case QED_LL2_TYPE_ROCE:
662                 p_ramrod->conn_type = PROTOCOLID_ROCE;
663                 break;
664         default:
665                 p_ramrod->conn_type = PROTOCOLID_ETH;
666                 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
667         }
668
669         p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
670         return qed_spq_post(p_hwfn, p_ent, NULL);
671 }
672
673 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
674                                     struct qed_ll2_info *p_ll2_conn)
675 {
676         struct core_rx_stop_ramrod_data *p_ramrod = NULL;
677         struct qed_spq_entry *p_ent = NULL;
678         struct qed_sp_init_data init_data;
679         int rc = -EINVAL;
680
681         /* Get SPQ entry */
682         memset(&init_data, 0, sizeof(init_data));
683         init_data.cid = p_ll2_conn->cid;
684         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
685         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
686
687         rc = qed_sp_init_request(p_hwfn, &p_ent,
688                                  CORE_RAMROD_RX_QUEUE_STOP,
689                                  PROTOCOLID_CORE, &init_data);
690         if (rc)
691                 return rc;
692
693         p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
694
695         p_ramrod->complete_event_flg = 1;
696         p_ramrod->queue_id = p_ll2_conn->queue_id;
697
698         return qed_spq_post(p_hwfn, p_ent, NULL);
699 }
700
701 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
702                                     struct qed_ll2_info *p_ll2_conn)
703 {
704         struct qed_spq_entry *p_ent = NULL;
705         struct qed_sp_init_data init_data;
706         int rc = -EINVAL;
707
708         /* Get SPQ entry */
709         memset(&init_data, 0, sizeof(init_data));
710         init_data.cid = p_ll2_conn->cid;
711         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
712         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
713
714         rc = qed_sp_init_request(p_hwfn, &p_ent,
715                                  CORE_RAMROD_TX_QUEUE_STOP,
716                                  PROTOCOLID_CORE, &init_data);
717         if (rc)
718                 return rc;
719
720         return qed_spq_post(p_hwfn, p_ent, NULL);
721 }
722
723 static int
724 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
725                               struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
726 {
727         struct qed_ll2_rx_packet *p_descq;
728         u32 capacity;
729         int rc = 0;
730
731         if (!rx_num_desc)
732                 goto out;
733
734         rc = qed_chain_alloc(p_hwfn->cdev,
735                              QED_CHAIN_USE_TO_CONSUME_PRODUCE,
736                              QED_CHAIN_MODE_NEXT_PTR,
737                              QED_CHAIN_CNT_TYPE_U16,
738                              rx_num_desc,
739                              sizeof(struct core_rx_bd),
740                              &p_ll2_info->rx_queue.rxq_chain);
741         if (rc) {
742                 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
743                 goto out;
744         }
745
746         capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
747         p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
748                           GFP_KERNEL);
749         if (!p_descq) {
750                 rc = -ENOMEM;
751                 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
752                 goto out;
753         }
754         p_ll2_info->rx_queue.descq_array = p_descq;
755
756         rc = qed_chain_alloc(p_hwfn->cdev,
757                              QED_CHAIN_USE_TO_CONSUME_PRODUCE,
758                              QED_CHAIN_MODE_PBL,
759                              QED_CHAIN_CNT_TYPE_U16,
760                              rx_num_desc,
761                              sizeof(struct core_rx_fast_path_cqe),
762                              &p_ll2_info->rx_queue.rcq_chain);
763         if (rc) {
764                 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
765                 goto out;
766         }
767
768         DP_VERBOSE(p_hwfn, QED_MSG_LL2,
769                    "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
770                    p_ll2_info->conn_type, rx_num_desc);
771
772 out:
773         return rc;
774 }
775
776 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
777                                          struct qed_ll2_info *p_ll2_info,
778                                          u16 tx_num_desc)
779 {
780         struct qed_ll2_tx_packet *p_descq;
781         u32 capacity;
782         int rc = 0;
783
784         if (!tx_num_desc)
785                 goto out;
786
787         rc = qed_chain_alloc(p_hwfn->cdev,
788                              QED_CHAIN_USE_TO_CONSUME_PRODUCE,
789                              QED_CHAIN_MODE_PBL,
790                              QED_CHAIN_CNT_TYPE_U16,
791                              tx_num_desc,
792                              sizeof(struct core_tx_bd),
793                              &p_ll2_info->tx_queue.txq_chain);
794         if (rc)
795                 goto out;
796
797         capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
798         p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
799                           GFP_KERNEL);
800         if (!p_descq) {
801                 rc = -ENOMEM;
802                 goto out;
803         }
804         p_ll2_info->tx_queue.descq_array = p_descq;
805
806         DP_VERBOSE(p_hwfn, QED_MSG_LL2,
807                    "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
808                    p_ll2_info->conn_type, tx_num_desc);
809
810 out:
811         if (rc)
812                 DP_NOTICE(p_hwfn,
813                           "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
814                           tx_num_desc);
815         return rc;
816 }
817
818 int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
819                                struct qed_ll2_info *p_params,
820                                u16 rx_num_desc,
821                                u16 tx_num_desc,
822                                u8 *p_connection_handle)
823 {
824         qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
825         struct qed_ll2_info *p_ll2_info = NULL;
826         int rc;
827         u8 i;
828
829         if (!p_connection_handle || !p_hwfn->p_ll2_info)
830                 return -EINVAL;
831
832         /* Find a free connection to be used */
833         for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
834                 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
835                 if (p_hwfn->p_ll2_info[i].b_active) {
836                         mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
837                         continue;
838                 }
839
840                 p_hwfn->p_ll2_info[i].b_active = true;
841                 p_ll2_info = &p_hwfn->p_ll2_info[i];
842                 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
843                 break;
844         }
845         if (!p_ll2_info)
846                 return -EBUSY;
847
848         p_ll2_info->conn_type = p_params->conn_type;
849         p_ll2_info->mtu = p_params->mtu;
850         p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
851         p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
852         p_ll2_info->tx_tc = p_params->tx_tc;
853         p_ll2_info->tx_dest = p_params->tx_dest;
854         p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
855         p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
856         p_ll2_info->gsi_enable = p_params->gsi_enable;
857
858         rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
859         if (rc)
860                 goto q_allocate_fail;
861
862         rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
863         if (rc)
864                 goto q_allocate_fail;
865
866         /* Register callbacks for the Rx/Tx queues */
867         comp_rx_cb = qed_ll2_rxq_completion;
868         comp_tx_cb = qed_ll2_txq_completion;
869
870         if (rx_num_desc) {
871                 qed_int_register_cb(p_hwfn, comp_rx_cb,
872                                     &p_hwfn->p_ll2_info[i],
873                                     &p_ll2_info->rx_queue.rx_sb_index,
874                                     &p_ll2_info->rx_queue.p_fw_cons);
875                 p_ll2_info->rx_queue.b_cb_registred = true;
876         }
877
878         if (tx_num_desc) {
879                 qed_int_register_cb(p_hwfn,
880                                     comp_tx_cb,
881                                     &p_hwfn->p_ll2_info[i],
882                                     &p_ll2_info->tx_queue.tx_sb_index,
883                                     &p_ll2_info->tx_queue.p_fw_cons);
884                 p_ll2_info->tx_queue.b_cb_registred = true;
885         }
886
887         *p_connection_handle = i;
888         return rc;
889
890 q_allocate_fail:
891         qed_ll2_release_connection(p_hwfn, i);
892         return -ENOMEM;
893 }
894
895 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
896                                            struct qed_ll2_info *p_ll2_conn)
897 {
898         u8 action_on_error = 0;
899
900         if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
901                 return 0;
902
903         DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
904
905         SET_FIELD(action_on_error,
906                   CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
907                   p_ll2_conn->ai_err_packet_too_big);
908         SET_FIELD(action_on_error,
909                   CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
910
911         return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
912 }
913
914 int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
915 {
916         struct qed_ll2_info *p_ll2_conn;
917         struct qed_ll2_rx_queue *p_rx;
918         struct qed_ll2_tx_queue *p_tx;
919         int rc = -EINVAL;
920         u32 i, capacity;
921         u8 qid;
922
923         p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
924         if (!p_ll2_conn)
925                 return -EINVAL;
926         p_rx = &p_ll2_conn->rx_queue;
927         p_tx = &p_ll2_conn->tx_queue;
928
929         qed_chain_reset(&p_rx->rxq_chain);
930         qed_chain_reset(&p_rx->rcq_chain);
931         INIT_LIST_HEAD(&p_rx->active_descq);
932         INIT_LIST_HEAD(&p_rx->free_descq);
933         INIT_LIST_HEAD(&p_rx->posting_descq);
934         spin_lock_init(&p_rx->lock);
935         capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
936         for (i = 0; i < capacity; i++)
937                 list_add_tail(&p_rx->descq_array[i].list_entry,
938                               &p_rx->free_descq);
939         *p_rx->p_fw_cons = 0;
940
941         qed_chain_reset(&p_tx->txq_chain);
942         INIT_LIST_HEAD(&p_tx->active_descq);
943         INIT_LIST_HEAD(&p_tx->free_descq);
944         INIT_LIST_HEAD(&p_tx->sending_descq);
945         spin_lock_init(&p_tx->lock);
946         capacity = qed_chain_get_capacity(&p_tx->txq_chain);
947         for (i = 0; i < capacity; i++)
948                 list_add_tail(&p_tx->descq_array[i].list_entry,
949                               &p_tx->free_descq);
950         p_tx->cur_completing_bd_idx = 0;
951         p_tx->bds_idx = 0;
952         p_tx->b_completing_packet = false;
953         p_tx->cur_send_packet = NULL;
954         p_tx->cur_send_frag_num = 0;
955         p_tx->cur_completing_frag_num = 0;
956         *p_tx->p_fw_cons = 0;
957
958         qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
959
960         qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
961         p_ll2_conn->queue_id = qid;
962         p_ll2_conn->tx_stats_id = qid;
963         p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
964                                             GTT_BAR0_MAP_REG_TSDM_RAM +
965                                             TSTORM_LL2_RX_PRODS_OFFSET(qid);
966         p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
967                                             qed_db_addr(p_ll2_conn->cid,
968                                                         DQ_DEMS_LEGACY);
969
970         rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
971         if (rc)
972                 return rc;
973
974         rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
975         if (rc)
976                 return rc;
977
978         if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
979                 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
980
981         return rc;
982 }
983
984 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
985                                              struct qed_ll2_rx_queue *p_rx,
986                                              struct qed_ll2_rx_packet *p_curp)
987 {
988         struct qed_ll2_rx_packet *p_posting_packet = NULL;
989         struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
990         bool b_notify_fw = false;
991         u16 bd_prod, cq_prod;
992
993         /* This handles the flushing of already posted buffers */
994         while (!list_empty(&p_rx->posting_descq)) {
995                 p_posting_packet = list_first_entry(&p_rx->posting_descq,
996                                                     struct qed_ll2_rx_packet,
997                                                     list_entry);
998                 list_move_tail(&p_posting_packet->list_entry,
999                                &p_rx->active_descq);
1000                 b_notify_fw = true;
1001         }
1002
1003         /* This handles the supplied packet [if there is one] */
1004         if (p_curp) {
1005                 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1006                 b_notify_fw = true;
1007         }
1008
1009         if (!b_notify_fw)
1010                 return;
1011
1012         bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1013         cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1014         rx_prod.bd_prod = cpu_to_le16(bd_prod);
1015         rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1016
1017         /* Make sure chain element is updated before ringing the doorbell */
1018         dma_wmb();
1019
1020         DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1021 }
1022
1023 int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1024                            u8 connection_handle,
1025                            dma_addr_t addr,
1026                            u16 buf_len, void *cookie, u8 notify_fw)
1027 {
1028         struct core_rx_bd_with_buff_len *p_curb = NULL;
1029         struct qed_ll2_rx_packet *p_curp = NULL;
1030         struct qed_ll2_info *p_ll2_conn;
1031         struct qed_ll2_rx_queue *p_rx;
1032         unsigned long flags;
1033         void *p_data;
1034         int rc = 0;
1035
1036         p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1037         if (!p_ll2_conn)
1038                 return -EINVAL;
1039         p_rx = &p_ll2_conn->rx_queue;
1040
1041         spin_lock_irqsave(&p_rx->lock, flags);
1042         if (!list_empty(&p_rx->free_descq))
1043                 p_curp = list_first_entry(&p_rx->free_descq,
1044                                           struct qed_ll2_rx_packet, list_entry);
1045         if (p_curp) {
1046                 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1047                     qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1048                         p_data = qed_chain_produce(&p_rx->rxq_chain);
1049                         p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1050                         qed_chain_produce(&p_rx->rcq_chain);
1051                 }
1052         }
1053
1054         /* If we're lacking entires, let's try to flush buffers to FW */
1055         if (!p_curp || !p_curb) {
1056                 rc = -EBUSY;
1057                 p_curp = NULL;
1058                 goto out_notify;
1059         }
1060
1061         /* We have an Rx packet we can fill */
1062         DMA_REGPAIR_LE(p_curb->addr, addr);
1063         p_curb->buff_length = cpu_to_le16(buf_len);
1064         p_curp->rx_buf_addr = addr;
1065         p_curp->cookie = cookie;
1066         p_curp->rxq_bd = p_curb;
1067         p_curp->buf_length = buf_len;
1068         list_del(&p_curp->list_entry);
1069
1070         /* Check if we only want to enqueue this packet without informing FW */
1071         if (!notify_fw) {
1072                 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1073                 goto out;
1074         }
1075
1076 out_notify:
1077         qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1078 out:
1079         spin_unlock_irqrestore(&p_rx->lock, flags);
1080         return rc;
1081 }
1082
1083 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1084                                           struct qed_ll2_tx_queue *p_tx,
1085                                           struct qed_ll2_tx_packet *p_curp,
1086                                           u8 num_of_bds,
1087                                           dma_addr_t first_frag,
1088                                           u16 first_frag_len, void *p_cookie,
1089                                           u8 notify_fw)
1090 {
1091         list_del(&p_curp->list_entry);
1092         p_curp->cookie = p_cookie;
1093         p_curp->bd_used = num_of_bds;
1094         p_curp->notify_fw = notify_fw;
1095         p_tx->cur_send_packet = p_curp;
1096         p_tx->cur_send_frag_num = 0;
1097
1098         p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
1099         p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
1100         p_tx->cur_send_frag_num++;
1101 }
1102
1103 static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1104                                              struct qed_ll2_info *p_ll2,
1105                                              struct qed_ll2_tx_packet *p_curp,
1106                                              u8 num_of_bds,
1107                                              enum core_tx_dest tx_dest,
1108                                              u16 vlan,
1109                                              u8 bd_flags,
1110                                              u16 l4_hdr_offset_w,
1111                                              enum core_roce_flavor_type type,
1112                                              dma_addr_t first_frag,
1113                                              u16 first_frag_len)
1114 {
1115         struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1116         u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1117         struct core_tx_bd *start_bd = NULL;
1118         u16 frag_idx;
1119
1120         start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1121         start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
1122         SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1123                   cpu_to_le16(l4_hdr_offset_w));
1124         SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1125         start_bd->bd_flags.as_bitfield = bd_flags;
1126         start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
1127             CORE_TX_BD_FLAGS_START_BD_SHIFT;
1128         SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
1129         SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
1130         DMA_REGPAIR_LE(start_bd->addr, first_frag);
1131         start_bd->nbytes = cpu_to_le16(first_frag_len);
1132
1133         DP_VERBOSE(p_hwfn,
1134                    (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1135                    "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1136                    p_ll2->queue_id,
1137                    p_ll2->cid,
1138                    p_ll2->conn_type,
1139                    prod_idx,
1140                    first_frag_len,
1141                    num_of_bds,
1142                    le32_to_cpu(start_bd->addr.hi),
1143                    le32_to_cpu(start_bd->addr.lo));
1144
1145         if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
1146                 return;
1147
1148         /* Need to provide the packet with additional BDs for frags */
1149         for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1150              frag_idx < num_of_bds; frag_idx++) {
1151                 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1152
1153                 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1154                 (*p_bd)->bd_flags.as_bitfield = 0;
1155                 (*p_bd)->bitfield1 = 0;
1156                 (*p_bd)->bitfield0 = 0;
1157                 p_curp->bds_set[frag_idx].tx_frag = 0;
1158                 p_curp->bds_set[frag_idx].frag_len = 0;
1159         }
1160 }
1161
1162 /* This should be called while the Txq spinlock is being held */
1163 static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1164                                      struct qed_ll2_info *p_ll2_conn)
1165 {
1166         bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1167         struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1168         struct qed_ll2_tx_packet *p_pkt = NULL;
1169         struct core_db_data db_msg = { 0, 0, 0 };
1170         u16 bd_prod;
1171
1172         /* If there are missing BDs, don't do anything now */
1173         if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1174             p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1175                 return;
1176
1177         /* Push the current packet to the list and clean after it */
1178         list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1179                       &p_ll2_conn->tx_queue.sending_descq);
1180         p_ll2_conn->tx_queue.cur_send_packet = NULL;
1181         p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1182
1183         /* Notify FW of packet only if requested to */
1184         if (!b_notify)
1185                 return;
1186
1187         bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1188
1189         while (!list_empty(&p_tx->sending_descq)) {
1190                 p_pkt = list_first_entry(&p_tx->sending_descq,
1191                                          struct qed_ll2_tx_packet, list_entry);
1192                 if (!p_pkt)
1193                         break;
1194
1195                 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
1196         }
1197
1198         SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1199         SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1200         SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1201                   DQ_XCM_CORE_TX_BD_PROD_CMD);
1202         db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1203         db_msg.spq_prod = cpu_to_le16(bd_prod);
1204
1205         /* Make sure the BDs data is updated before ringing the doorbell */
1206         wmb();
1207
1208         DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1209
1210         DP_VERBOSE(p_hwfn,
1211                    (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1212                    "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1213                    p_ll2_conn->queue_id,
1214                    p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
1215 }
1216
1217 int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1218                               u8 connection_handle,
1219                               u8 num_of_bds,
1220                               u16 vlan,
1221                               u8 bd_flags,
1222                               u16 l4_hdr_offset_w,
1223                               enum qed_ll2_roce_flavor_type qed_roce_flavor,
1224                               dma_addr_t first_frag,
1225                               u16 first_frag_len, void *cookie, u8 notify_fw)
1226 {
1227         struct qed_ll2_tx_packet *p_curp = NULL;
1228         struct qed_ll2_info *p_ll2_conn = NULL;
1229         enum core_roce_flavor_type roce_flavor;
1230         struct qed_ll2_tx_queue *p_tx;
1231         struct qed_chain *p_tx_chain;
1232         unsigned long flags;
1233         int rc = 0;
1234
1235         p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1236         if (!p_ll2_conn)
1237                 return -EINVAL;
1238         p_tx = &p_ll2_conn->tx_queue;
1239         p_tx_chain = &p_tx->txq_chain;
1240
1241         if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1242                 return -EIO;
1243
1244         spin_lock_irqsave(&p_tx->lock, flags);
1245         if (p_tx->cur_send_packet) {
1246                 rc = -EEXIST;
1247                 goto out;
1248         }
1249
1250         /* Get entry, but only if we have tx elements for it */
1251         if (!list_empty(&p_tx->free_descq))
1252                 p_curp = list_first_entry(&p_tx->free_descq,
1253                                           struct qed_ll2_tx_packet, list_entry);
1254         if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
1255                 p_curp = NULL;
1256
1257         if (!p_curp) {
1258                 rc = -EBUSY;
1259                 goto out;
1260         }
1261
1262         if (qed_roce_flavor == QED_LL2_ROCE) {
1263                 roce_flavor = CORE_ROCE;
1264         } else if (qed_roce_flavor == QED_LL2_RROCE) {
1265                 roce_flavor = CORE_RROCE;
1266         } else {
1267                 rc = -EINVAL;
1268                 goto out;
1269         }
1270
1271         /* Prepare packet and BD, and perhaps send a doorbell to FW */
1272         qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
1273                                       num_of_bds, first_frag,
1274                                       first_frag_len, cookie, notify_fw);
1275         qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
1276                                          num_of_bds, CORE_TX_DEST_NW,
1277                                          vlan, bd_flags, l4_hdr_offset_w,
1278                                          roce_flavor,
1279                                          first_frag, first_frag_len);
1280
1281         qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1282
1283 out:
1284         spin_unlock_irqrestore(&p_tx->lock, flags);
1285         return rc;
1286 }
1287
1288 int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1289                                       u8 connection_handle,
1290                                       dma_addr_t addr, u16 nbytes)
1291 {
1292         struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1293         struct qed_ll2_info *p_ll2_conn = NULL;
1294         u16 cur_send_frag_num = 0;
1295         struct core_tx_bd *p_bd;
1296         unsigned long flags;
1297
1298         p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1299         if (!p_ll2_conn)
1300                 return -EINVAL;
1301
1302         if (!p_ll2_conn->tx_queue.cur_send_packet)
1303                 return -EINVAL;
1304
1305         p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1306         cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1307
1308         if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1309                 return -EINVAL;
1310
1311         /* Fill the BD information, and possibly notify FW */
1312         p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1313         DMA_REGPAIR_LE(p_bd->addr, addr);
1314         p_bd->nbytes = cpu_to_le16(nbytes);
1315         p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1316         p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1317
1318         p_ll2_conn->tx_queue.cur_send_frag_num++;
1319
1320         spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1321         qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1322         spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1323
1324         return 0;
1325 }
1326
1327 int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1328 {
1329         struct qed_ll2_info *p_ll2_conn = NULL;
1330         int rc = -EINVAL;
1331
1332         p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1333         if (!p_ll2_conn)
1334                 return -EINVAL;
1335
1336         /* Stop Tx & Rx of connection, if needed */
1337         if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1338                 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1339                 if (rc)
1340                         return rc;
1341                 qed_ll2_txq_flush(p_hwfn, connection_handle);
1342         }
1343
1344         if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1345                 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1346                 if (rc)
1347                         return rc;
1348                 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1349         }
1350
1351         return rc;
1352 }
1353
1354 void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1355 {
1356         struct qed_ll2_info *p_ll2_conn = NULL;
1357
1358         p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1359         if (!p_ll2_conn)
1360                 return;
1361
1362         if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1363                 p_ll2_conn->rx_queue.b_cb_registred = false;
1364                 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1365         }
1366
1367         if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1368                 p_ll2_conn->tx_queue.b_cb_registred = false;
1369                 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1370         }
1371
1372         kfree(p_ll2_conn->tx_queue.descq_array);
1373         qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1374
1375         kfree(p_ll2_conn->rx_queue.descq_array);
1376         qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1377         qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1378
1379         qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1380
1381         mutex_lock(&p_ll2_conn->mutex);
1382         p_ll2_conn->b_active = false;
1383         mutex_unlock(&p_ll2_conn->mutex);
1384 }
1385
1386 struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1387 {
1388         struct qed_ll2_info *p_ll2_connections;
1389         u8 i;
1390
1391         /* Allocate LL2's set struct */
1392         p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1393                                     sizeof(struct qed_ll2_info), GFP_KERNEL);
1394         if (!p_ll2_connections) {
1395                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1396                 return NULL;
1397         }
1398
1399         for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1400                 p_ll2_connections[i].my_id = i;
1401
1402         return p_ll2_connections;
1403 }
1404
1405 void qed_ll2_setup(struct qed_hwfn *p_hwfn,
1406                    struct qed_ll2_info *p_ll2_connections)
1407 {
1408         int i;
1409
1410         for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1411                 mutex_init(&p_ll2_connections[i].mutex);
1412 }
1413
1414 void qed_ll2_free(struct qed_hwfn *p_hwfn,
1415                   struct qed_ll2_info *p_ll2_connections)
1416 {
1417         kfree(p_ll2_connections);
1418 }
1419
1420 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1421                                 struct qed_ptt *p_ptt,
1422                                 struct qed_ll2_info *p_ll2_conn,
1423                                 struct qed_ll2_stats *p_stats)
1424 {
1425         struct core_ll2_tstorm_per_queue_stat tstats;
1426         u8 qid = p_ll2_conn->queue_id;
1427         u32 tstats_addr;
1428
1429         memset(&tstats, 0, sizeof(tstats));
1430         tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1431                       CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1432         qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1433
1434         p_stats->packet_too_big_discard =
1435                         HILO_64_REGPAIR(tstats.packet_too_big_discard);
1436         p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1437 }
1438
1439 static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1440                                 struct qed_ptt *p_ptt,
1441                                 struct qed_ll2_info *p_ll2_conn,
1442                                 struct qed_ll2_stats *p_stats)
1443 {
1444         struct core_ll2_ustorm_per_queue_stat ustats;
1445         u8 qid = p_ll2_conn->queue_id;
1446         u32 ustats_addr;
1447
1448         memset(&ustats, 0, sizeof(ustats));
1449         ustats_addr = BAR0_MAP_REG_USDM_RAM +
1450                       CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1451         qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1452
1453         p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1454         p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1455         p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1456         p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1457         p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1458         p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1459 }
1460
1461 static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1462                                 struct qed_ptt *p_ptt,
1463                                 struct qed_ll2_info *p_ll2_conn,
1464                                 struct qed_ll2_stats *p_stats)
1465 {
1466         struct core_ll2_pstorm_per_queue_stat pstats;
1467         u8 stats_id = p_ll2_conn->tx_stats_id;
1468         u32 pstats_addr;
1469
1470         memset(&pstats, 0, sizeof(pstats));
1471         pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1472                       CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1473         qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1474
1475         p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1476         p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1477         p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1478         p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1479         p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1480         p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1481 }
1482
1483 int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
1484                       u8 connection_handle, struct qed_ll2_stats *p_stats)
1485 {
1486         struct qed_ll2_info *p_ll2_conn = NULL;
1487         struct qed_ptt *p_ptt;
1488
1489         memset(p_stats, 0, sizeof(*p_stats));
1490
1491         if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
1492             !p_hwfn->p_ll2_info)
1493                 return -EINVAL;
1494
1495         p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
1496
1497         p_ptt = qed_ptt_acquire(p_hwfn);
1498         if (!p_ptt) {
1499                 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1500                 return -EINVAL;
1501         }
1502
1503         _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1504         _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1505         if (p_ll2_conn->tx_stats_en)
1506                 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1507
1508         qed_ptt_release(p_hwfn, p_ptt);
1509         return 0;
1510 }
1511
1512 static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
1513                                     const struct qed_ll2_cb_ops *ops,
1514                                     void *cookie)
1515 {
1516         cdev->ll2->cbs = ops;
1517         cdev->ll2->cb_cookie = cookie;
1518 }
1519
1520 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
1521 {
1522         struct qed_ll2_info ll2_info;
1523         struct qed_ll2_buffer *buffer, *tmp_buffer;
1524         enum qed_ll2_conn_type conn_type;
1525         struct qed_ptt *p_ptt;
1526         int rc, i;
1527
1528         /* Initialize LL2 locks & lists */
1529         INIT_LIST_HEAD(&cdev->ll2->list);
1530         spin_lock_init(&cdev->ll2->lock);
1531         cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
1532                              L1_CACHE_BYTES + params->mtu;
1533         cdev->ll2->frags_mapped = params->frags_mapped;
1534
1535         /*Allocate memory for LL2 */
1536         DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
1537                 cdev->ll2->rx_size);
1538         for (i = 0; i < QED_LL2_RX_SIZE; i++) {
1539                 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1540                 if (!buffer) {
1541                         DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
1542                         goto fail;
1543                 }
1544
1545                 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
1546                                           &buffer->phys_addr);
1547                 if (rc) {
1548                         kfree(buffer);
1549                         goto fail;
1550                 }
1551
1552                 list_add_tail(&buffer->list, &cdev->ll2->list);
1553         }
1554
1555         switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
1556         case QED_PCI_ISCSI:
1557                 conn_type = QED_LL2_TYPE_ISCSI;
1558                 break;
1559         case QED_PCI_ETH_ROCE:
1560                 conn_type = QED_LL2_TYPE_ROCE;
1561                 break;
1562         default:
1563                 conn_type = QED_LL2_TYPE_TEST;
1564         }
1565
1566         /* Prepare the temporary ll2 information */
1567         memset(&ll2_info, 0, sizeof(ll2_info));
1568         ll2_info.conn_type = conn_type;
1569         ll2_info.mtu = params->mtu;
1570         ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
1571         ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
1572         ll2_info.tx_tc = 0;
1573         ll2_info.tx_dest = CORE_TX_DEST_NW;
1574         ll2_info.gsi_enable = 1;
1575
1576         rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
1577                                         QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
1578                                         &cdev->ll2->handle);
1579         if (rc) {
1580                 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
1581                 goto fail;
1582         }
1583
1584         rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
1585                                           cdev->ll2->handle);
1586         if (rc) {
1587                 DP_INFO(cdev, "Failed to establish LL2 connection\n");
1588                 goto release_fail;
1589         }
1590
1591         /* Post all Rx buffers to FW */
1592         spin_lock_bh(&cdev->ll2->lock);
1593         list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
1594                 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
1595                                             cdev->ll2->handle,
1596                                             buffer->phys_addr, 0, buffer, 1);
1597                 if (rc) {
1598                         DP_INFO(cdev,
1599                                 "Failed to post an Rx buffer; Deleting it\n");
1600                         dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
1601                                          cdev->ll2->rx_size, DMA_FROM_DEVICE);
1602                         kfree(buffer->data);
1603                         list_del(&buffer->list);
1604                         kfree(buffer);
1605                 } else {
1606                         cdev->ll2->rx_cnt++;
1607                 }
1608         }
1609         spin_unlock_bh(&cdev->ll2->lock);
1610
1611         if (!cdev->ll2->rx_cnt) {
1612                 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
1613                 goto release_terminate;
1614         }
1615
1616         if (!is_valid_ether_addr(params->ll2_mac_address)) {
1617                 DP_INFO(cdev, "Invalid Ethernet address\n");
1618                 goto release_terminate;
1619         }
1620
1621         p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1622         if (!p_ptt) {
1623                 DP_INFO(cdev, "Failed to acquire PTT\n");
1624                 goto release_terminate;
1625         }
1626
1627         rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
1628                                     params->ll2_mac_address);
1629         qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
1630         if (rc) {
1631                 DP_ERR(cdev, "Failed to allocate LLH filter\n");
1632                 goto release_terminate_all;
1633         }
1634
1635         ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
1636
1637         return 0;
1638
1639 release_terminate_all:
1640
1641 release_terminate:
1642         qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1643 release_fail:
1644         qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1645 fail:
1646         qed_ll2_kill_buffers(cdev);
1647         cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
1648         return -EINVAL;
1649 }
1650
1651 static int qed_ll2_stop(struct qed_dev *cdev)
1652 {
1653         struct qed_ptt *p_ptt;
1654         int rc;
1655
1656         if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
1657                 return 0;
1658
1659         p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1660         if (!p_ptt) {
1661                 DP_INFO(cdev, "Failed to acquire PTT\n");
1662                 goto fail;
1663         }
1664
1665         qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
1666                                   cdev->ll2_mac_address);
1667         qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
1668         eth_zero_addr(cdev->ll2_mac_address);
1669
1670         rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
1671                                           cdev->ll2->handle);
1672         if (rc)
1673                 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
1674
1675         qed_ll2_kill_buffers(cdev);
1676
1677         qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1678         cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
1679
1680         return rc;
1681 fail:
1682         return -EINVAL;
1683 }
1684
1685 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
1686 {
1687         const skb_frag_t *frag;
1688         int rc = -EINVAL, i;
1689         dma_addr_t mapping;
1690         u16 vlan = 0;
1691         u8 flags = 0;
1692
1693         if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
1694                 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
1695                 return -EINVAL;
1696         }
1697
1698         if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
1699                 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
1700                        1 + skb_shinfo(skb)->nr_frags);
1701                 return -EINVAL;
1702         }
1703
1704         mapping = dma_map_single(&cdev->pdev->dev, skb->data,
1705                                  skb->len, DMA_TO_DEVICE);
1706         if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
1707                 DP_NOTICE(cdev, "SKB mapping failed\n");
1708                 return -EINVAL;
1709         }
1710
1711         /* Request HW to calculate IP csum */
1712         if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
1713               ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1714                 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
1715
1716         if (skb_vlan_tag_present(skb)) {
1717                 vlan = skb_vlan_tag_get(skb);
1718                 flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
1719         }
1720
1721         rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
1722                                        cdev->ll2->handle,
1723                                        1 + skb_shinfo(skb)->nr_frags,
1724                                        vlan, flags, 0, 0 /* RoCE FLAVOR */,
1725                                        mapping, skb->len, skb, 1);
1726         if (rc)
1727                 goto err;
1728
1729         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1730                 frag = &skb_shinfo(skb)->frags[i];
1731                 if (!cdev->ll2->frags_mapped) {
1732                         mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
1733                                                    skb_frag_size(frag),
1734                                                    DMA_TO_DEVICE);
1735
1736                         if (unlikely(dma_mapping_error(&cdev->pdev->dev,
1737                                                        mapping))) {
1738                                 DP_NOTICE(cdev,
1739                                           "Unable to map frag - dropping packet\n");
1740                                 rc = -ENOMEM;
1741                                 goto err;
1742                         }
1743                 } else {
1744                         mapping = page_to_phys(skb_frag_page(frag)) |
1745                             frag->page_offset;
1746                 }
1747
1748                 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
1749                                                        cdev->ll2->handle,
1750                                                        mapping,
1751                                                        skb_frag_size(frag));
1752
1753                 /* if failed not much to do here, partial packet has been posted
1754                  * we can't free memory, will need to wait for completion.
1755                  */
1756                 if (rc)
1757                         goto err2;
1758         }
1759
1760         return 0;
1761
1762 err:
1763         dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
1764
1765 err2:
1766         return rc;
1767 }
1768
1769 static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
1770 {
1771         if (!cdev->ll2)
1772                 return -EINVAL;
1773
1774         return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
1775                                  cdev->ll2->handle, stats);
1776 }
1777
1778 const struct qed_ll2_ops qed_ll2_ops_pass = {
1779         .start = &qed_ll2_start,
1780         .stop = &qed_ll2_stop,
1781         .start_xmit = &qed_ll2_start_xmit,
1782         .register_cb_ops = &qed_ll2_register_cb_ops,
1783         .get_stats = &qed_ll2_stats,
1784 };
1785
1786 int qed_ll2_alloc_if(struct qed_dev *cdev)
1787 {
1788         cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
1789         return cdev->ll2 ? 0 : -ENOMEM;
1790 }
1791
1792 void qed_ll2_dealloc_if(struct qed_dev *cdev)
1793 {
1794         kfree(cdev->ll2);
1795         cdev->ll2 = NULL;
1796 }