2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
10 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
19 #include "nicvf_queues.h"
21 static void nicvf_get_page(struct nicvf *nic)
23 if (!nic->rb_pageref || !nic->rb_page)
26 page_ref_add(nic->rb_page, nic->rb_pageref);
30 /* Poll a register for a specific value */
31 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
32 u64 reg, int bit_pos, int bits, int val)
38 bit_mask = (1ULL << bits) - 1;
39 bit_mask = (bit_mask << bit_pos);
42 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
43 if (((reg_val & bit_mask) >> bit_pos) == val)
45 usleep_range(1000, 2000);
48 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
52 /* Allocate memory for a queue's descriptors */
53 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
54 int q_len, int desc_size, int align_bytes)
57 dmem->size = (desc_size * q_len) + align_bytes;
58 /* Save address, need it while freeing */
59 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
60 &dmem->dma, GFP_KERNEL);
61 if (!dmem->unalign_base)
64 /* Align memory address for 'align_bytes' */
65 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
66 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
70 /* Free queue's descriptor memory */
71 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
76 dma_free_coherent(&nic->pdev->dev, dmem->size,
77 dmem->unalign_base, dmem->dma);
78 dmem->unalign_base = NULL;
82 /* Allocate buffer for packet reception
83 * HW returns memory address where packet is DMA'ed but not a pointer
84 * into RBDR ring, so save buffer address at the start of fragment and
85 * align the start address to a cache aligned address
87 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
88 u32 buf_len, u64 **rbuf)
90 int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0;
92 /* Check if request can be accomodated in previous allocated page */
94 ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) {
102 /* Allocate a new page */
104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
107 this_cpu_inc(nic->pnicvf->drv_stats->
108 rcv_buffer_alloc_failures);
111 nic->rb_page_offset = 0;
115 *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
116 nic->rb_page_offset += buf_len;
121 /* Build skb around receive buffer */
122 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
128 data = phys_to_virt(rb_ptr);
130 /* Now build an skb to give to stack */
131 skb = build_skb(data, RCV_FRAG_LEN);
133 put_page(virt_to_page(data));
141 /* Allocate RBDR ring and populate receive buffers */
142 static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
143 int ring_len, int buf_size)
147 struct rbdr_entry_t *desc;
150 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
151 sizeof(struct rbdr_entry_t),
152 NICVF_RCV_BUF_ALIGN_BYTES);
156 rbdr->desc = rbdr->dmem.base;
157 /* Buffer size has to be in multiples of 128 bytes */
158 rbdr->dma_size = buf_size;
160 rbdr->thresh = RBDR_THRESH;
163 for (idx = 0; idx < ring_len; idx++) {
164 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
169 desc = GET_RBDR_DESC(rbdr, idx);
170 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
178 /* Free RBDR ring and its receive buffers */
179 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
183 struct rbdr_entry_t *desc;
188 rbdr->enable = false;
189 if (!rbdr->dmem.base)
196 while (head != tail) {
197 desc = GET_RBDR_DESC(rbdr, head);
198 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
199 put_page(virt_to_page(phys_to_virt(buf_addr)));
201 head &= (rbdr->dmem.q_len - 1);
203 /* Free SKB of tail desc */
204 desc = GET_RBDR_DESC(rbdr, tail);
205 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
206 put_page(virt_to_page(phys_to_virt(buf_addr)));
209 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
212 /* Refill receive buffer descriptors with new buffers.
214 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
216 struct queue_set *qs = nic->qs;
217 int rbdr_idx = qs->rbdr_cnt;
221 struct rbdr_entry_t *desc;
229 rbdr = &qs->rbdr[rbdr_idx];
230 /* Check if it's enabled */
234 /* Get no of desc's to be refilled */
235 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
237 /* Doorbell can be ringed with a max of ring size minus 1 */
238 if (qcount >= (qs->rbdr_len - 1))
241 refill_rb_cnt = qs->rbdr_len - qcount - 1;
243 /* Start filling descs from tail */
244 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
245 while (refill_rb_cnt) {
247 tail &= (rbdr->dmem.q_len - 1);
249 if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
252 desc = GET_RBDR_DESC(rbdr, tail);
253 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
260 /* make sure all memory stores are done before ringing doorbell */
263 /* Check if buffer allocation failed */
265 nic->rb_alloc_fail = true;
267 nic->rb_alloc_fail = false;
270 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
273 /* Re-enable RBDR interrupts only if buffer allocation is success */
274 if (!nic->rb_alloc_fail && rbdr->enable &&
275 netif_running(nic->pnicvf->netdev))
276 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
282 /* Alloc rcv buffers in non-atomic mode for better success */
283 void nicvf_rbdr_work(struct work_struct *work)
285 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
287 nicvf_refill_rbdr(nic, GFP_KERNEL);
288 if (nic->rb_alloc_fail)
289 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
291 nic->rb_work_scheduled = false;
294 /* In Softirq context, alloc rcv buffers in atomic mode */
295 void nicvf_rbdr_task(unsigned long data)
297 struct nicvf *nic = (struct nicvf *)data;
299 nicvf_refill_rbdr(nic, GFP_ATOMIC);
300 if (nic->rb_alloc_fail) {
301 nic->rb_work_scheduled = true;
302 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
306 /* Initialize completion queue */
307 static int nicvf_init_cmp_queue(struct nicvf *nic,
308 struct cmp_queue *cq, int q_len)
312 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
313 NICVF_CQ_BASE_ALIGN_BYTES);
317 cq->desc = cq->dmem.base;
318 cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
319 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
324 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
331 nicvf_free_q_desc_mem(nic, &cq->dmem);
334 /* Initialize transmit queue */
335 static int nicvf_init_snd_queue(struct nicvf *nic,
336 struct snd_queue *sq, int q_len)
340 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
341 NICVF_SQ_BASE_ALIGN_BYTES);
345 sq->desc = sq->dmem.base;
346 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
351 atomic_set(&sq->free_cnt, q_len - 1);
352 sq->thresh = SND_QUEUE_THRESH;
354 /* Preallocate memory for TSO segment's header */
355 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
356 q_len * TSO_HEADER_SIZE,
357 &sq->tso_hdrs_phys, GFP_KERNEL);
364 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
374 dma_free_coherent(&nic->pdev->dev,
375 sq->dmem.q_len * TSO_HEADER_SIZE,
376 sq->tso_hdrs, sq->tso_hdrs_phys);
378 /* Free pending skbs in the queue */
380 while (sq->head != sq->tail) {
381 skb = (struct sk_buff *)sq->skbuff[sq->head];
383 dev_kfree_skb_any(skb);
385 sq->head &= (sq->dmem.q_len - 1);
388 nicvf_free_q_desc_mem(nic, &sq->dmem);
391 static void nicvf_reclaim_snd_queue(struct nicvf *nic,
392 struct queue_set *qs, int qidx)
394 /* Disable send queue */
395 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
396 /* Check if SQ is stopped */
397 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
399 /* Reset send queue */
400 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
403 static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
404 struct queue_set *qs, int qidx)
406 union nic_mbx mbx = {};
408 /* Make sure all packets in the pipeline are written back into mem */
409 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
410 nicvf_send_msg_to_pf(nic, &mbx);
413 static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
414 struct queue_set *qs, int qidx)
416 /* Disable timer threshold (doesn't get reset upon CQ reset */
417 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
418 /* Disable completion queue */
419 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
420 /* Reset completion queue */
421 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
424 static void nicvf_reclaim_rbdr(struct nicvf *nic,
425 struct rbdr *rbdr, int qidx)
430 /* Save head and tail pointers for feeing up buffers */
431 rbdr->head = nicvf_queue_reg_read(nic,
432 NIC_QSET_RBDR_0_1_HEAD,
434 rbdr->tail = nicvf_queue_reg_read(nic,
435 NIC_QSET_RBDR_0_1_TAIL,
438 /* If RBDR FIFO is in 'FAIL' state then do a reset first
441 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
442 if (((fifo_state >> 62) & 0x03) == 0x3)
443 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
444 qidx, NICVF_RBDR_RESET);
447 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
448 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
451 tmp = nicvf_queue_reg_read(nic,
452 NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
454 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
456 usleep_range(1000, 2000);
459 netdev_err(nic->netdev,
460 "Failed polling on prefetch status\n");
464 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
465 qidx, NICVF_RBDR_RESET);
467 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
469 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
470 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
474 void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
479 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
481 /* Enable first VLAN stripping */
482 if (features & NETIF_F_HW_VLAN_CTAG_RX)
483 rq_cfg |= (1ULL << 25);
485 rq_cfg &= ~(1ULL << 25);
486 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
488 /* Configure Secondary Qsets, if any */
489 for (sqs = 0; sqs < nic->sqs_count; sqs++)
490 if (nic->snicvf[sqs])
491 nicvf_queue_reg_write(nic->snicvf[sqs],
492 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
495 static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
497 union nic_mbx mbx = {};
499 /* Reset all RQ/SQ and VF stats */
500 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
501 mbx.reset_stat.rx_stat_mask = 0x3FFF;
502 mbx.reset_stat.tx_stat_mask = 0x1F;
503 mbx.reset_stat.rq_stat_mask = 0xFFFF;
504 mbx.reset_stat.sq_stat_mask = 0xFFFF;
505 nicvf_send_msg_to_pf(nic, &mbx);
508 /* Configures receive queue */
509 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
510 int qidx, bool enable)
512 union nic_mbx mbx = {};
513 struct rcv_queue *rq;
514 struct rq_cfg rq_cfg;
519 /* Disable receive queue */
520 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
523 nicvf_reclaim_rcv_queue(nic, qs, qidx);
527 rq->cq_qs = qs->vnic_id;
529 rq->start_rbdr_qs = qs->vnic_id;
530 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
531 rq->cont_rbdr_qs = qs->vnic_id;
532 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
533 /* all writes of RBDR data to be loaded into L2 Cache as well*/
536 /* Send a mailbox msg to PF to config RQ */
537 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
538 mbx.rq.qs_num = qs->vnic_id;
539 mbx.rq.rq_num = qidx;
540 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
541 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
542 (rq->cont_qs_rbdr_idx << 8) |
543 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
544 nicvf_send_msg_to_pf(nic, &mbx);
546 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
547 mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
548 nicvf_send_msg_to_pf(nic, &mbx);
551 * Enable CQ drop to reserve sufficient CQEs for all tx packets
553 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
554 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
555 nicvf_send_msg_to_pf(nic, &mbx);
557 if (!nic->sqs_mode && (qidx == 0)) {
558 /* Enable checking L3/L4 length and TCP/UDP checksums */
559 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
560 (BIT(24) | BIT(23) | BIT(21)));
561 nicvf_config_vlan_stripping(nic, nic->netdev->features);
564 /* Enable Receive queue */
565 memset(&rq_cfg, 0, sizeof(struct rq_cfg));
568 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
571 /* Configures completion queue */
572 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
573 int qidx, bool enable)
575 struct cmp_queue *cq;
576 struct cq_cfg cq_cfg;
582 nicvf_reclaim_cmp_queue(nic, qs, qidx);
586 /* Reset completion queue */
587 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
592 spin_lock_init(&cq->lock);
593 /* Set completion queue base address */
594 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
595 qidx, (u64)(cq->dmem.phys_base));
597 /* Enable Completion queue */
598 memset(&cq_cfg, 0, sizeof(struct cq_cfg));
602 cq_cfg.qsize = CMP_QSIZE;
604 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
606 /* Set threshold value for interrupt generation */
607 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
608 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
609 qidx, CMP_QUEUE_TIMER_THRESH);
612 /* Configures transmit queue */
613 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
614 int qidx, bool enable)
616 union nic_mbx mbx = {};
617 struct snd_queue *sq;
618 struct sq_cfg sq_cfg;
624 nicvf_reclaim_snd_queue(nic, qs, qidx);
628 /* Reset send queue */
629 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
631 sq->cq_qs = qs->vnic_id;
634 /* Send a mailbox msg to PF to config SQ */
635 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
636 mbx.sq.qs_num = qs->vnic_id;
637 mbx.sq.sq_num = qidx;
638 mbx.sq.sqs_mode = nic->sqs_mode;
639 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
640 nicvf_send_msg_to_pf(nic, &mbx);
642 /* Set queue base address */
643 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
644 qidx, (u64)(sq->dmem.phys_base));
646 /* Enable send queue & set queue size */
647 memset(&sq_cfg, 0, sizeof(struct sq_cfg));
651 sq_cfg.qsize = SND_QSIZE;
652 sq_cfg.tstmp_bgx_intf = 0;
653 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
655 /* Set threshold value for interrupt generation */
656 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
658 /* Set queue:cpu affinity for better load distribution */
659 if (cpu_online(qidx)) {
660 cpumask_set_cpu(qidx, &sq->affinity_mask);
661 netif_set_xps_queue(nic->netdev,
662 &sq->affinity_mask, qidx);
666 /* Configures receive buffer descriptor ring */
667 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
668 int qidx, bool enable)
671 struct rbdr_cfg rbdr_cfg;
673 rbdr = &qs->rbdr[qidx];
674 nicvf_reclaim_rbdr(nic, rbdr, qidx);
678 /* Set descriptor base address */
679 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
680 qidx, (u64)(rbdr->dmem.phys_base));
682 /* Enable RBDR & set queue size */
683 /* Buffer size should be in multiples of 128 bytes */
684 memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
688 rbdr_cfg.qsize = RBDR_SIZE;
689 rbdr_cfg.avg_con = 0;
690 rbdr_cfg.lines = rbdr->dma_size / 128;
691 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
692 qidx, *(u64 *)&rbdr_cfg);
695 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
696 qidx, qs->rbdr_len - 1);
698 /* Set threshold value for interrupt generation */
699 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
700 qidx, rbdr->thresh - 1);
703 /* Requests PF to assign and enable Qset */
704 void nicvf_qset_config(struct nicvf *nic, bool enable)
706 union nic_mbx mbx = {};
707 struct queue_set *qs = nic->qs;
708 struct qs_cfg *qs_cfg;
711 netdev_warn(nic->netdev,
712 "Qset is still not allocated, don't init queues\n");
717 qs->vnic_id = nic->vf_id;
719 /* Send a mailbox msg to PF to config Qset */
720 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
721 mbx.qs.num = qs->vnic_id;
722 mbx.qs.sqs_count = nic->sqs_count;
725 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
731 qs_cfg->vnic = qs->vnic_id;
733 nicvf_send_msg_to_pf(nic, &mbx);
736 static void nicvf_free_resources(struct nicvf *nic)
739 struct queue_set *qs = nic->qs;
741 /* Free receive buffer descriptor ring */
742 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
743 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
745 /* Free completion queue */
746 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
747 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
749 /* Free send queue */
750 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
751 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
754 static int nicvf_alloc_resources(struct nicvf *nic)
757 struct queue_set *qs = nic->qs;
759 /* Alloc receive buffer descriptor ring */
760 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
761 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
766 /* Alloc send queue */
767 for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
768 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
772 /* Alloc completion queue */
773 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
774 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
780 nicvf_free_resources(nic);
784 int nicvf_set_qset_resources(struct nicvf *nic)
786 struct queue_set *qs;
788 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
793 /* Set count of each queue */
794 qs->rbdr_cnt = DEFAULT_RBDR_CNT;
795 qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
796 qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
797 qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
799 /* Set queue lengths */
800 qs->rbdr_len = RCV_BUF_COUNT;
801 qs->sq_len = SND_QUEUE_LEN;
802 qs->cq_len = CMP_QUEUE_LEN;
804 nic->rx_queues = qs->rq_cnt;
805 nic->tx_queues = qs->sq_cnt;
810 int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
812 bool disable = false;
813 struct queue_set *qs = nic->qs;
820 if (nicvf_alloc_resources(nic))
823 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
824 nicvf_snd_queue_config(nic, qs, qidx, enable);
825 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
826 nicvf_cmp_queue_config(nic, qs, qidx, enable);
827 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
828 nicvf_rbdr_config(nic, qs, qidx, enable);
829 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
830 nicvf_rcv_queue_config(nic, qs, qidx, enable);
832 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
833 nicvf_rcv_queue_config(nic, qs, qidx, disable);
834 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
835 nicvf_rbdr_config(nic, qs, qidx, disable);
836 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
837 nicvf_snd_queue_config(nic, qs, qidx, disable);
838 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
839 nicvf_cmp_queue_config(nic, qs, qidx, disable);
841 nicvf_free_resources(nic);
844 /* Reset RXQ's stats.
845 * SQ's stats will get reset automatically once SQ is reset.
847 nicvf_reset_rcv_queue_stats(nic);
852 /* Get a free desc from SQ
853 * returns descriptor ponter & descriptor number
855 static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
860 atomic_sub(desc_cnt, &sq->free_cnt);
861 sq->tail += desc_cnt;
862 sq->tail &= (sq->dmem.q_len - 1);
867 /* Free descriptor back to SQ for future use */
868 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
870 atomic_add(desc_cnt, &sq->free_cnt);
871 sq->head += desc_cnt;
872 sq->head &= (sq->dmem.q_len - 1);
875 static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
878 qentry &= (sq->dmem.q_len - 1);
882 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
886 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
887 sq_cfg |= NICVF_SQ_EN;
888 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
889 /* Ring doorbell so that H/W restarts processing SQEs */
890 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
893 void nicvf_sq_disable(struct nicvf *nic, int qidx)
897 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
898 sq_cfg &= ~NICVF_SQ_EN;
899 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
902 void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
907 struct nicvf *nic = netdev_priv(netdev);
908 struct sq_hdr_subdesc *hdr;
910 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
911 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
912 while (sq->head != head) {
913 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
914 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
915 nicvf_put_sq_desc(sq, 1);
918 skb = (struct sk_buff *)sq->skbuff[sq->head];
920 dev_kfree_skb_any(skb);
921 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
922 atomic64_add(hdr->tot_len,
923 (atomic64_t *)&netdev->stats.tx_bytes);
924 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
928 /* Calculate no of SQ subdescriptors needed to transmit all
929 * segments of this TSO packet.
930 * Taken from 'Tilera network driver' with a minor modification.
932 static int nicvf_tso_count_subdescs(struct sk_buff *skb)
934 struct skb_shared_info *sh = skb_shinfo(skb);
935 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
936 unsigned int data_len = skb->len - sh_len;
937 unsigned int p_len = sh->gso_size;
938 long f_id = -1; /* id of the current fragment */
939 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
940 long f_used = 0; /* bytes used from the current fragment */
941 long n; /* size of the current piece of payload */
945 for (segment = 0; segment < sh->gso_segs; segment++) {
946 unsigned int p_used = 0;
948 /* One edesc for header and for each piece of the payload. */
949 for (num_edescs++; p_used < p_len; num_edescs++) {
950 /* Advance as needed. */
951 while (f_used >= f_size) {
953 f_size = skb_frag_size(&sh->frags[f_id]);
957 /* Use bytes from the current fragment. */
959 if (n > f_size - f_used)
965 /* The last segment may be less than gso_size. */
967 if (data_len < p_len)
971 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
972 return num_edescs + sh->gso_segs;
975 #define POST_CQE_DESC_COUNT 2
977 /* Get the number of SQ descriptors needed to xmit this skb */
978 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
980 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
982 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
983 subdesc_cnt = nicvf_tso_count_subdescs(skb);
987 /* Dummy descriptors to get TSO pkt completion notification */
988 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
989 subdesc_cnt += POST_CQE_DESC_COUNT;
991 if (skb_shinfo(skb)->nr_frags)
992 subdesc_cnt += skb_shinfo(skb)->nr_frags;
997 /* Add SQ HEADER subdescriptor.
998 * First subdescriptor for every send descriptor.
1001 nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
1002 int subdesc_cnt, struct sk_buff *skb, int len)
1005 struct sq_hdr_subdesc *hdr;
1007 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1008 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1009 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1011 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
1012 /* post_cqe = 0, to avoid HW posting a CQE for every TSO
1013 * segment transmitted on 88xx.
1015 hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
1017 sq->skbuff[qentry] = (u64)skb;
1018 /* Enable notification via CQE after processing SQE */
1020 /* No of subdescriptors following this */
1021 hdr->subdesc_cnt = subdesc_cnt;
1025 /* Offload checksum calculation to HW */
1026 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1027 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1028 hdr->l3_offset = skb_network_offset(skb);
1029 hdr->l4_offset = skb_transport_offset(skb);
1031 proto = ip_hdr(skb)->protocol;
1034 hdr->csum_l4 = SEND_L4_CSUM_TCP;
1037 hdr->csum_l4 = SEND_L4_CSUM_UDP;
1040 hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1045 if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
1047 hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
1048 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
1049 /* For non-tunneled pkts, point this to L2 ethertype */
1050 hdr->inner_l3_offset = skb_network_offset(skb) - 2;
1051 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1055 /* SQ GATHER subdescriptor
1056 * Must follow HDR descriptor
1058 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1061 struct sq_gather_subdesc *gather;
1063 qentry &= (sq->dmem.q_len - 1);
1064 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1066 memset(gather, 0, SND_QUEUE_DESC_SIZE);
1067 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
1068 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1069 gather->size = size;
1070 gather->addr = data;
1073 /* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
1074 * packet so that a CQE is posted as a notifation for transmission of
1077 static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
1078 int tso_sqe, struct sk_buff *skb)
1080 struct sq_imm_subdesc *imm;
1081 struct sq_hdr_subdesc *hdr;
1083 sq->skbuff[qentry] = (u64)skb;
1085 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1086 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1087 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1088 /* Enable notification via CQE after processing SQE */
1090 /* There is no packet to transmit here */
1092 hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
1094 /* Actual TSO header SQE index, needed for cleanup */
1095 hdr->rsvd2 = tso_sqe;
1097 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1098 imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
1099 memset(imm, 0, SND_QUEUE_DESC_SIZE);
1100 imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
1104 static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
1105 int sq_num, int desc_cnt)
1107 struct netdev_queue *txq;
1109 txq = netdev_get_tx_queue(nic->pnicvf->netdev,
1110 skb_get_queue_mapping(skb));
1112 netdev_tx_sent_queue(txq, skb->len);
1114 /* make sure all memory stores are done before ringing doorbell */
1117 /* Inform HW to xmit all TSO segments */
1118 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1122 /* Segment a TSO packet into 'gso_size' segments and append
1123 * them to SQ for transfer
1125 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1126 int sq_num, int qentry, struct sk_buff *skb)
1129 int seg_subdescs = 0, desc_cnt = 0;
1130 int seg_len, total_len, data_left;
1131 int hdr_qentry = qentry;
1132 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1134 tso_start(skb, &tso);
1135 total_len = skb->len - hdr_len;
1136 while (total_len > 0) {
1139 /* Save Qentry for adding HDR_SUBDESC at the end */
1140 hdr_qentry = qentry;
1142 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1143 total_len -= data_left;
1145 /* Add segment's header */
1146 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1147 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
1148 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1149 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
1151 qentry * TSO_HEADER_SIZE);
1152 /* HDR_SUDESC + GATHER */
1156 /* Add segment's payload fragments */
1157 while (data_left > 0) {
1160 size = min_t(int, tso.size, data_left);
1162 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1163 nicvf_sq_add_gather_subdesc(sq, qentry, size,
1164 virt_to_phys(tso.data));
1169 tso_build_data(skb, &tso, size);
1171 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
1172 seg_subdescs - 1, skb, seg_len);
1173 sq->skbuff[hdr_qentry] = (u64)NULL;
1174 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1176 desc_cnt += seg_subdescs;
1178 /* Save SKB in the last segment for freeing */
1179 sq->skbuff[hdr_qentry] = (u64)skb;
1181 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
1183 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1187 /* Append an skb to a SQ for packet transfer. */
1188 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
1191 int subdesc_cnt, tso_sqe = 0;
1193 struct queue_set *qs;
1194 struct snd_queue *sq;
1196 sq_num = skb_get_queue_mapping(skb);
1197 if (sq_num >= MAX_SND_QUEUES_PER_QS) {
1198 /* Get secondary Qset's SQ structure */
1199 i = sq_num / MAX_SND_QUEUES_PER_QS;
1200 if (!nic->snicvf[i - 1]) {
1201 netdev_warn(nic->netdev,
1202 "Secondary Qset#%d's ptr not initialized\n",
1206 nic = (struct nicvf *)nic->snicvf[i - 1];
1207 sq_num = sq_num % MAX_SND_QUEUES_PER_QS;
1211 sq = &qs->sq[sq_num];
1213 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1214 if (subdesc_cnt > atomic_read(&sq->free_cnt))
1217 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1219 /* Check if its a TSO packet */
1220 if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
1221 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
1223 /* Add SQ header subdesc */
1224 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
1228 /* Add SQ gather subdescs */
1229 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1230 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1231 nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
1233 /* Check for scattered buffer */
1234 if (!skb_is_nonlinear(skb))
1237 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1238 const struct skb_frag_struct *frag;
1240 frag = &skb_shinfo(skb)->frags[i];
1242 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1243 size = skb_frag_size(frag);
1244 nicvf_sq_add_gather_subdesc(sq, qentry, size,
1246 skb_frag_address(frag)));
1250 if (nic->t88 && skb_shinfo(skb)->gso_size) {
1251 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1252 nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
1255 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
1260 /* Use original PCI dev for debug log */
1262 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
1266 static inline unsigned frag_num(unsigned i)
1269 return (i & ~3) + 3 - (i & 3);
1275 /* Returns SKB for a received packet */
1276 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1279 int payload_len = 0;
1280 struct sk_buff *skb = NULL;
1283 u16 *rb_lens = NULL;
1284 u64 *rb_ptrs = NULL;
1286 rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
1287 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
1288 * CQE_RX at word6, hence buffer pointers move by word
1290 * Use existing 'hw_tso' flag which will be set for all chips
1291 * except 88xx pass1 instead of a additional cache line
1292 * access (or miss) by using pci dev's revision.
1295 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
1297 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
1299 netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
1300 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1302 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1303 payload_len = rb_lens[frag_num(frag)];
1305 /* First fragment */
1306 skb = nicvf_rb_ptr_to_skb(nic,
1307 *rb_ptrs - cqe_rx->align_pad,
1311 skb_reserve(skb, cqe_rx->align_pad);
1312 skb_put(skb, payload_len);
1315 page = virt_to_page(phys_to_virt(*rb_ptrs));
1316 offset = phys_to_virt(*rb_ptrs) - page_address(page);
1317 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1318 offset, payload_len, RCV_FRAG_LEN);
1320 /* Next buffer pointer */
1326 static u64 nicvf_int_type_to_mask(int int_type, int q_idx)
1332 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1335 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1337 case NICVF_INTR_RBDR:
1338 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1340 case NICVF_INTR_PKT_DROP:
1341 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1343 case NICVF_INTR_TCP_TIMER:
1344 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1346 case NICVF_INTR_MBOX:
1347 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
1349 case NICVF_INTR_QS_ERR:
1350 reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1359 /* Enable interrupt */
1360 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1362 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1365 netdev_dbg(nic->netdev,
1366 "Failed to enable interrupt: unknown type\n");
1369 nicvf_reg_write(nic, NIC_VF_ENA_W1S,
1370 nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
1373 /* Disable interrupt */
1374 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1376 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1379 netdev_dbg(nic->netdev,
1380 "Failed to disable interrupt: unknown type\n");
1384 nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
1387 /* Clear interrupt */
1388 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1390 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1393 netdev_dbg(nic->netdev,
1394 "Failed to clear interrupt: unknown type\n");
1398 nicvf_reg_write(nic, NIC_VF_INT, mask);
1401 /* Check if interrupt is enabled */
1402 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1404 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1405 /* If interrupt type is unknown, we treat it disabled. */
1407 netdev_dbg(nic->netdev,
1408 "Failed to check interrupt enable: unknown type\n");
1412 return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1415 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1417 struct rcv_queue *rq;
1419 #define GET_RQ_STATS(reg) \
1420 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1421 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1423 rq = &nic->qs->rq[rq_idx];
1424 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
1425 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
1428 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1430 struct snd_queue *sq;
1432 #define GET_SQ_STATS(reg) \
1433 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1434 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1436 sq = &nic->qs->sq[sq_idx];
1437 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
1438 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
1441 /* Check for errors in the receive cmp.queue entry */
1442 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1444 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
1447 if (netif_msg_rx_err(nic))
1448 netdev_err(nic->netdev,
1449 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1451 cqe_rx->err_level, cqe_rx->err_opcode);
1453 switch (cqe_rx->err_opcode) {
1454 case CQ_RX_ERROP_RE_PARTIAL:
1455 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
1457 case CQ_RX_ERROP_RE_JABBER:
1458 this_cpu_inc(nic->drv_stats->rx_jabber_errs);
1460 case CQ_RX_ERROP_RE_FCS:
1461 this_cpu_inc(nic->drv_stats->rx_fcs_errs);
1463 case CQ_RX_ERROP_RE_RX_CTL:
1464 this_cpu_inc(nic->drv_stats->rx_bgx_errs);
1466 case CQ_RX_ERROP_PREL2_ERR:
1467 this_cpu_inc(nic->drv_stats->rx_prel2_errs);
1469 case CQ_RX_ERROP_L2_MAL:
1470 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
1472 case CQ_RX_ERROP_L2_OVERSIZE:
1473 this_cpu_inc(nic->drv_stats->rx_oversize);
1475 case CQ_RX_ERROP_L2_UNDERSIZE:
1476 this_cpu_inc(nic->drv_stats->rx_undersize);
1478 case CQ_RX_ERROP_L2_LENMISM:
1479 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
1481 case CQ_RX_ERROP_L2_PCLP:
1482 this_cpu_inc(nic->drv_stats->rx_l2_pclp);
1484 case CQ_RX_ERROP_IP_NOT:
1485 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
1487 case CQ_RX_ERROP_IP_CSUM_ERR:
1488 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
1490 case CQ_RX_ERROP_IP_MAL:
1491 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
1493 case CQ_RX_ERROP_IP_MALD:
1494 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
1496 case CQ_RX_ERROP_IP_HOP:
1497 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
1499 case CQ_RX_ERROP_L3_PCLP:
1500 this_cpu_inc(nic->drv_stats->rx_l3_pclp);
1502 case CQ_RX_ERROP_L4_MAL:
1503 this_cpu_inc(nic->drv_stats->rx_l4_malformed);
1505 case CQ_RX_ERROP_L4_CHK:
1506 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
1508 case CQ_RX_ERROP_UDP_LEN:
1509 this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
1511 case CQ_RX_ERROP_L4_PORT:
1512 this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
1514 case CQ_RX_ERROP_TCP_FLAG:
1515 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
1517 case CQ_RX_ERROP_TCP_OFFSET:
1518 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
1520 case CQ_RX_ERROP_L4_PCLP:
1521 this_cpu_inc(nic->drv_stats->rx_l4_pclp);
1523 case CQ_RX_ERROP_RBDR_TRUNC:
1524 this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
1531 /* Check for errors in the send cmp.queue entry */
1532 int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
1534 switch (cqe_tx->send_status) {
1535 case CQ_TX_ERROP_GOOD:
1537 case CQ_TX_ERROP_DESC_FAULT:
1538 this_cpu_inc(nic->drv_stats->tx_desc_fault);
1540 case CQ_TX_ERROP_HDR_CONS_ERR:
1541 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
1543 case CQ_TX_ERROP_SUBDC_ERR:
1544 this_cpu_inc(nic->drv_stats->tx_subdesc_err);
1546 case CQ_TX_ERROP_MAX_SIZE_VIOL:
1547 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
1549 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1550 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
1552 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1553 this_cpu_inc(nic->drv_stats->tx_data_seq_err);
1555 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1556 this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
1558 case CQ_TX_ERROP_LOCK_VIOL:
1559 this_cpu_inc(nic->drv_stats->tx_lock_viol);
1561 case CQ_TX_ERROP_DATA_FAULT:
1562 this_cpu_inc(nic->drv_stats->tx_data_fault);
1564 case CQ_TX_ERROP_TSTMP_CONFLICT:
1565 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
1567 case CQ_TX_ERROP_TSTMP_TIMEOUT:
1568 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
1570 case CQ_TX_ERROP_MEM_FAULT:
1571 this_cpu_inc(nic->drv_stats->tx_mem_fault);
1573 case CQ_TX_ERROP_CK_OVERLAP:
1574 this_cpu_inc(nic->drv_stats->tx_csum_overlap);
1576 case CQ_TX_ERROP_CK_OFLOW:
1577 this_cpu_inc(nic->drv_stats->tx_csum_overflow);