1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
29 /* OOM task polling interval */
30 #define LIO_OOM_POLL_INTERVAL_MS 250
32 #define OCTNIC_MAX_SG MAX_SKB_FRAGS
35 * \brief Callback for getting interface configuration
36 * @param status status of request
37 * @param buf pointer to resp structure
39 void lio_if_cfg_callback(struct octeon_device *oct,
40 u32 status __attribute__((unused)), void *buf)
42 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
43 struct liquidio_if_cfg_context *ctx;
44 struct liquidio_if_cfg_resp *resp;
46 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
47 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
49 oct = lio_get_device(ctx->octeon_id);
51 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
52 CVM_CAST64(resp->status));
53 WRITE_ONCE(ctx->cond, 1);
55 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
56 resp->cfg_info.liquidio_firmware_version);
58 /* This barrier is required to be sure that the response has been
59 * written fully before waking up the handler
63 wake_up_interruptible(&ctx->wc);
67 * \brief Delete gather lists
68 * @param lio per-network private data
70 void lio_delete_glists(struct lio *lio)
72 struct octnic_gather *g;
75 kfree(lio->glist_lock);
76 lio->glist_lock = NULL;
81 for (i = 0; i < lio->oct_dev->num_iqs; i++) {
83 g = (struct octnic_gather *)
84 lio_list_delete_head(&lio->glist[i]);
88 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
89 lio->glists_dma_base && lio->glists_dma_base[i]) {
90 lio_dma_free(lio->oct_dev,
91 lio->glist_entry_size * lio->tx_qsize,
92 lio->glists_virt_base[i],
93 lio->glists_dma_base[i]);
97 kfree(lio->glists_virt_base);
98 lio->glists_virt_base = NULL;
100 kfree(lio->glists_dma_base);
101 lio->glists_dma_base = NULL;
108 * \brief Setup gather lists
109 * @param lio per-network private data
111 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
113 struct octnic_gather *g;
117 kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
118 if (!lio->glist_lock)
122 kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
124 kfree(lio->glist_lock);
125 lio->glist_lock = NULL;
129 lio->glist_entry_size =
130 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
132 /* allocate memory to store virtual and dma base address of
133 * per glist consistent memory
135 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
137 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
140 if (!lio->glists_virt_base || !lio->glists_dma_base) {
141 lio_delete_glists(lio);
145 for (i = 0; i < num_iqs; i++) {
146 int numa_node = dev_to_node(&oct->pci_dev->dev);
148 spin_lock_init(&lio->glist_lock[i]);
150 INIT_LIST_HEAD(&lio->glist[i]);
152 lio->glists_virt_base[i] =
154 lio->glist_entry_size * lio->tx_qsize,
155 &lio->glists_dma_base[i]);
157 if (!lio->glists_virt_base[i]) {
158 lio_delete_glists(lio);
162 for (j = 0; j < lio->tx_qsize; j++) {
163 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
166 g = kzalloc(sizeof(*g), GFP_KERNEL);
170 g->sg = lio->glists_virt_base[i] +
171 (j * lio->glist_entry_size);
173 g->sg_dma_ptr = lio->glists_dma_base[i] +
174 (j * lio->glist_entry_size);
176 list_add_tail(&g->list, &lio->glist[i]);
179 if (j != lio->tx_qsize) {
180 lio_delete_glists(lio);
188 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
190 struct lio *lio = GET_LIO(netdev);
191 struct octeon_device *oct = lio->oct_dev;
192 struct octnic_ctrl_pkt nctrl;
195 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
198 nctrl.ncmd.s.cmd = cmd;
199 nctrl.ncmd.s.param1 = param1;
200 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
201 nctrl.wait_time = 100;
202 nctrl.netpndev = (u64)netdev;
203 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
205 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
207 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
213 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
214 unsigned int bytes_compl)
216 struct netdev_queue *netdev_queue = txq;
218 netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
221 void octeon_update_tx_completion_counters(void *buf, int reqtype,
222 unsigned int *pkts_compl,
223 unsigned int *bytes_compl)
225 struct octnet_buf_free_info *finfo;
226 struct sk_buff *skb = NULL;
227 struct octeon_soft_command *sc;
230 case REQTYPE_NORESP_NET:
231 case REQTYPE_NORESP_NET_SG:
236 case REQTYPE_RESP_NET_SG:
237 case REQTYPE_RESP_NET:
239 skb = sc->callback_arg;
247 *bytes_compl += skb->len;
250 int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
252 struct octnet_buf_free_info *finfo;
254 struct octeon_soft_command *sc;
255 struct netdev_queue *txq;
258 case REQTYPE_NORESP_NET:
259 case REQTYPE_NORESP_NET_SG:
264 case REQTYPE_RESP_NET_SG:
265 case REQTYPE_RESP_NET:
267 skb = sc->callback_arg;
274 txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
275 netdev_tx_sent_queue(txq, skb->len);
277 return netif_xmit_stopped(txq);
280 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
282 struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
283 struct net_device *netdev = (struct net_device *)nctrl->netpndev;
284 struct lio *lio = GET_LIO(netdev);
285 struct octeon_device *oct = lio->oct_dev;
288 if (nctrl->completion && nctrl->response_code) {
289 /* Signal whoever is interested that the response code from the
290 * firmware has arrived.
292 WRITE_ONCE(*nctrl->response_code, nctrl->status);
293 complete(nctrl->completion);
299 switch (nctrl->ncmd.s.cmd) {
300 case OCTNET_CMD_CHANGE_DEVFLAGS:
301 case OCTNET_CMD_SET_MULTI_LIST:
302 case OCTNET_CMD_SET_UC_LIST:
305 case OCTNET_CMD_CHANGE_MACADDR:
306 mac = ((u8 *)&nctrl->udd[0]) + 2;
307 if (nctrl->ncmd.s.param1) {
308 /* vfidx is 0 based, but vf_num (param1) is 1 based */
309 int vfidx = nctrl->ncmd.s.param1 - 1;
310 bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
312 if (mac_is_admin_assigned)
313 netif_info(lio, probe, lio->netdev,
314 "MAC Address %pM is configured for VF %d\n",
317 netif_info(lio, probe, lio->netdev,
318 " MACAddr changed to %pM\n",
323 case OCTNET_CMD_GPIO_ACCESS:
324 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
328 case OCTNET_CMD_ID_ACTIVE:
329 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
333 case OCTNET_CMD_LRO_ENABLE:
334 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
337 case OCTNET_CMD_LRO_DISABLE:
338 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
342 case OCTNET_CMD_VERBOSE_ENABLE:
343 dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
347 case OCTNET_CMD_VERBOSE_DISABLE:
348 dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
352 case OCTNET_CMD_VLAN_FILTER_CTL:
353 if (nctrl->ncmd.s.param1)
354 dev_info(&oct->pci_dev->dev,
355 "%s VLAN filter enabled\n", netdev->name);
357 dev_info(&oct->pci_dev->dev,
358 "%s VLAN filter disabled\n", netdev->name);
361 case OCTNET_CMD_ADD_VLAN_FILTER:
362 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
363 netdev->name, nctrl->ncmd.s.param1);
366 case OCTNET_CMD_DEL_VLAN_FILTER:
367 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
368 netdev->name, nctrl->ncmd.s.param1);
371 case OCTNET_CMD_SET_SETTINGS:
372 dev_info(&oct->pci_dev->dev, "%s settings changed\n",
377 /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
378 * Command passed by NIC driver
380 case OCTNET_CMD_TNL_RX_CSUM_CTL:
381 if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
382 netif_info(lio, probe, lio->netdev,
383 "RX Checksum Offload Enabled\n");
384 } else if (nctrl->ncmd.s.param1 ==
385 OCTNET_CMD_RXCSUM_DISABLE) {
386 netif_info(lio, probe, lio->netdev,
387 "RX Checksum Offload Disabled\n");
391 /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
392 * Command passed by NIC driver
394 case OCTNET_CMD_TNL_TX_CSUM_CTL:
395 if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
396 netif_info(lio, probe, lio->netdev,
397 "TX Checksum Offload Enabled\n");
398 } else if (nctrl->ncmd.s.param1 ==
399 OCTNET_CMD_TXCSUM_DISABLE) {
400 netif_info(lio, probe, lio->netdev,
401 "TX Checksum Offload Disabled\n");
405 /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
406 * Command passed by NIC driver
408 case OCTNET_CMD_VXLAN_PORT_CONFIG:
409 if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
410 netif_info(lio, probe, lio->netdev,
411 "VxLAN Destination UDP PORT:%d ADDED\n",
412 nctrl->ncmd.s.param1);
413 } else if (nctrl->ncmd.s.more ==
414 OCTNET_CMD_VXLAN_PORT_DEL) {
415 netif_info(lio, probe, lio->netdev,
416 "VxLAN Destination UDP PORT:%d DELETED\n",
417 nctrl->ncmd.s.param1);
421 case OCTNET_CMD_SET_FLOW_CTL:
422 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
425 case OCTNET_CMD_QUEUE_COUNT_CTL:
426 netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
427 nctrl->ncmd.s.param1);
431 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
436 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
438 bool macaddr_changed = false;
439 struct net_device *netdev;
444 netdev = oct->props[0].netdev;
445 lio = GET_LIO(netdev);
447 lio->linfo.macaddr_is_admin_asgnd = true;
449 if (!ether_addr_equal(netdev->dev_addr, mac)) {
450 macaddr_changed = true;
451 ether_addr_copy(netdev->dev_addr, mac);
452 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
453 call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
459 dev_info(&oct->pci_dev->dev,
460 "PF changed VF's MAC address to %pM\n", mac);
462 /* no need to notify the firmware of the macaddr change because
463 * the PF did that already
467 static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
469 struct cavium_wk *wk = (struct cavium_wk *)work;
470 struct lio *lio = (struct lio *)wk->ctxptr;
471 struct octeon_device *oct = lio->oct_dev;
472 struct octeon_droq *droq;
475 if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
476 for (q = 0; q < lio->linfo.num_rxpciq; q++) {
477 q_no = lio->linfo.rxpciq[q].s.q_no;
478 droq = oct->droq[q_no];
481 octeon_droq_check_oom(droq);
484 queue_delayed_work(lio->rxq_status_wq.wq,
485 &lio->rxq_status_wq.wk.work,
486 msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
489 int setup_rx_oom_poll_fn(struct net_device *netdev)
491 struct lio *lio = GET_LIO(netdev);
492 struct octeon_device *oct = lio->oct_dev;
494 lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
496 if (!lio->rxq_status_wq.wq) {
497 dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
500 INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
501 octnet_poll_check_rxq_oom_status);
502 lio->rxq_status_wq.wk.ctxptr = lio;
503 queue_delayed_work(lio->rxq_status_wq.wq,
504 &lio->rxq_status_wq.wk.work,
505 msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
509 void cleanup_rx_oom_poll_fn(struct net_device *netdev)
511 struct lio *lio = GET_LIO(netdev);
513 if (lio->rxq_status_wq.wq) {
514 cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
515 flush_workqueue(lio->rxq_status_wq.wq);
516 destroy_workqueue(lio->rxq_status_wq.wq);
520 /* Runs in interrupt context. */
521 static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
523 struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
524 struct net_device *netdev;
527 netdev = oct->props[iq->ifidx].netdev;
529 /* This is needed because the first IQ does not have
530 * a netdev associated with it.
535 lio = GET_LIO(netdev);
536 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
537 lio->linfo.link.s.link_up &&
538 (!octnet_iq_is_full(oct, iq_num))) {
539 netif_wake_subqueue(netdev, iq->q_index);
540 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
546 * \brief Setup output queue
547 * @param oct octeon device
548 * @param q_no which queue
549 * @param num_descs how many descriptors
550 * @param desc_size size of each descriptor
551 * @param app_ctx application context
553 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
554 int desc_size, void *app_ctx)
558 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
559 /* droq creation and local register settings. */
560 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
565 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
569 /* Enable the droq queues */
570 octeon_set_droq_pkt_op(oct, q_no, 1);
572 /* Send Credit for Octeon Output queues. Credits are always
573 * sent after the output queue is enabled.
575 writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
580 /** Routine to push packets arriving on Octeon interface upto network layer.
581 * @param oct_id - octeon device id.
582 * @param skbuff - skbuff struct to be passed to network layer.
583 * @param len - size of total data received.
584 * @param rh - Control header associated with the packet
585 * @param param - additional control data with the packet
586 * @param arg - farg registered in droq_ops
589 liquidio_push_packet(u32 octeon_id __attribute__((unused)),
596 struct net_device *netdev = (struct net_device *)arg;
597 struct octeon_droq *droq =
598 container_of(param, struct octeon_droq, napi);
599 struct sk_buff *skb = (struct sk_buff *)skbuff;
600 struct skb_shared_hwtstamps *shhwtstamps;
601 struct napi_struct *napi = param;
607 struct lio *lio = GET_LIO(netdev);
608 struct octeon_device *oct = lio->oct_dev;
610 /* Do not proceed if the interface is not in RUNNING state. */
611 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
612 recv_buffer_free(skb);
613 droq->stats.rx_dropped++;
619 skb_record_rx_queue(skb, droq->q_no);
620 if (likely(len > MIN_SKB_SIZE)) {
621 struct octeon_skb_page_info *pg_info;
624 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
626 /* For Paged allocation use the frags */
627 va = page_address(pg_info->page) +
628 pg_info->page_offset;
629 memcpy(skb->data, va, MIN_SKB_SIZE);
630 skb_put(skb, MIN_SKB_SIZE);
631 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
633 pg_info->page_offset +
639 struct octeon_skb_page_info *pg_info =
640 ((struct octeon_skb_page_info *)(skb->cb));
641 skb_copy_to_linear_data(skb, page_address(pg_info->page)
642 + pg_info->page_offset, len);
644 put_page(pg_info->page);
647 r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
649 if (oct->ptp_enable) {
650 if (rh->r_dh.has_hwtstamp) {
651 /* timestamp is included from the hardware at
652 * the beginning of the packet.
656 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
657 /* Nanoseconds are in the first 64-bits
660 memcpy(&ns, (skb->data + r_dh_off),
662 r_dh_off -= BYTES_PER_DHLEN_UNIT;
663 shhwtstamps = skb_hwtstamps(skb);
664 shhwtstamps->hwtstamp =
671 if (rh->r_dh.has_hash) {
672 __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
673 u32 hash = be32_to_cpu(*hash_be);
675 skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
676 r_dh_off -= BYTES_PER_DHLEN_UNIT;
679 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
680 skb->protocol = eth_type_trans(skb, skb->dev);
682 if ((netdev->features & NETIF_F_RXCSUM) &&
683 (((rh->r_dh.encap_on) &&
684 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
685 (!(rh->r_dh.encap_on) &&
686 (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
687 /* checksum has already been verified */
688 skb->ip_summed = CHECKSUM_UNNECESSARY;
690 skb->ip_summed = CHECKSUM_NONE;
692 /* Setting Encapsulation field on basis of status received
695 if (rh->r_dh.encap_on) {
696 skb->encapsulation = 1;
698 droq->stats.rx_vxlan++;
701 /* inbound VLAN tag */
702 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
704 u16 priority = rh->r_dh.priority;
705 u16 vid = rh->r_dh.vlan;
707 vtag = (priority << VLAN_PRIO_SHIFT) | vid;
708 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
711 napi_gro_receive(napi, skb);
713 droq->stats.rx_bytes_received += len -
714 rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
715 droq->stats.rx_pkts_received++;
717 recv_buffer_free(skb);
722 * \brief wrapper for calling napi_schedule
723 * @param param parameters to pass to napi_schedule
725 * Used when scheduling on different CPUs
727 static void napi_schedule_wrapper(void *param)
729 struct napi_struct *napi = param;
735 * \brief callback when receive interrupt occurs and we are in NAPI mode
736 * @param arg pointer to octeon output queue
738 static void liquidio_napi_drv_callback(void *arg)
740 struct octeon_device *oct;
741 struct octeon_droq *droq = arg;
742 int this_cpu = smp_processor_id();
746 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
747 droq->cpu_id == this_cpu) {
748 napi_schedule_irqoff(&droq->napi);
750 call_single_data_t *csd = &droq->csd;
752 csd->func = napi_schedule_wrapper;
753 csd->info = &droq->napi;
756 smp_call_function_single_async(droq->cpu_id, csd);
761 * \brief Entry point for NAPI polling
762 * @param napi NAPI structure
763 * @param budget maximum number of items to process
765 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
767 struct octeon_instr_queue *iq;
768 struct octeon_device *oct;
769 struct octeon_droq *droq;
770 int tx_done = 0, iq_no;
773 droq = container_of(napi, struct octeon_droq, napi);
777 /* Handle Droq descriptors */
778 work_done = octeon_droq_process_poll_pkts(oct, droq, budget);
780 /* Flush the instruction queue */
781 iq = oct->instr_queue[iq_no];
783 /* TODO: move this check to inside octeon_flush_iq,
784 * once check_db_timeout is removed
786 if (atomic_read(&iq->instr_pending))
787 /* Process iq buffers with in the budget limits */
788 tx_done = octeon_flush_iq(oct, iq, budget);
791 /* Update iq read-index rather than waiting for next interrupt.
792 * Return back if tx_done is false.
794 /* sub-queue status update */
795 lio_update_txq_status(oct, iq_no);
797 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
801 #define MAX_REG_CNT 2000000U
802 /* force enable interrupt if reg cnts are high to avoid wraparound */
803 if ((work_done < budget && tx_done) ||
804 (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
805 (droq->pkt_count >= MAX_REG_CNT)) {
807 napi_complete_done(napi, work_done);
809 octeon_enable_irq(droq->oct_dev, droq->q_no);
813 return (!tx_done) ? (budget) : (work_done);
817 * \brief Setup input and output queues
818 * @param octeon_dev octeon device
819 * @param ifidx Interface index
821 * Note: Queues are with respect to the octeon device. Thus
822 * an input queue is for egress packets, and output queues
823 * are for ingress packets.
825 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
826 u32 num_iqs, u32 num_oqs)
828 struct octeon_droq_ops droq_ops;
829 struct net_device *netdev;
830 struct octeon_droq *droq;
831 struct napi_struct *napi;
839 netdev = octeon_dev->props[ifidx].netdev;
841 lio = GET_LIO(netdev);
843 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
845 droq_ops.fptr = liquidio_push_packet;
846 droq_ops.farg = netdev;
848 droq_ops.poll_mode = 1;
849 droq_ops.napi_fn = liquidio_napi_drv_callback;
851 cpu_id_modulus = num_present_cpus();
854 for (q = 0; q < num_oqs; q++) {
855 q_no = lio->linfo.rxpciq[q].s.q_no;
856 dev_dbg(&octeon_dev->pci_dev->dev,
857 "%s index:%d linfo.rxpciq.s.q_no:%d\n",
859 retval = octeon_setup_droq(
861 CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
863 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
867 dev_err(&octeon_dev->pci_dev->dev,
868 "%s : Runtime DROQ(RxQ) creation failed.\n",
873 droq = octeon_dev->droq[q_no];
875 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
876 (u64)netdev, (u64)octeon_dev);
877 netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
879 /* designate a CPU for this droq */
880 droq->cpu_id = cpu_id;
882 if (cpu_id >= cpu_id_modulus)
885 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
888 if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
889 /* 23XX PF/VF can send/recv control messages (via the first
890 * PF/VF-owned droq) from the firmware even if the ethX
891 * interface is down, so that's why poll_mode must be off
892 * for the first droq.
894 octeon_dev->droq[0]->ops.poll_mode = 0;
898 for (q = 0; q < num_iqs; q++) {
899 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
900 octeon_get_conf(octeon_dev), lio->ifidx);
901 retval = octeon_setup_iq(octeon_dev, ifidx, q,
902 lio->linfo.txpciq[q], num_tx_descs,
903 netdev_get_tx_queue(netdev, q));
905 dev_err(&octeon_dev->pci_dev->dev,
906 " %s : Runtime IQ(TxQ) creation failed.\n",
912 if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
913 octeon_dev->ioq_vector) {
914 struct octeon_ioq_vector *ioq_vector;
916 ioq_vector = &octeon_dev->ioq_vector[q];
917 netif_set_xps_queue(netdev,
918 &ioq_vector->affinity_mask,
919 ioq_vector->iq_index);
927 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
929 struct octeon_device *oct = droq->oct_dev;
930 struct octeon_device_priv *oct_priv =
931 (struct octeon_device_priv *)oct->priv;
933 if (droq->ops.poll_mode) {
934 droq->ops.napi_fn(droq);
936 if (ret & MSIX_PO_INT) {
937 if (OCTEON_CN23XX_VF(oct))
938 dev_err(&oct->pci_dev->dev,
939 "should not come here should not get rx when poll mode = 0 for vf\n");
940 tasklet_schedule(&oct_priv->droq_tasklet);
943 /* this will be flushed periodically by check iq db */
944 if (ret & MSIX_PI_INT)
952 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
954 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
955 struct octeon_device *oct = ioq_vector->oct_dev;
956 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
959 ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
961 if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
962 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
968 * \brief Droq packet processor sceduler
969 * @param oct octeon device
971 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
973 struct octeon_device_priv *oct_priv =
974 (struct octeon_device_priv *)oct->priv;
975 struct octeon_droq *droq;
978 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
979 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
981 if (!(oct->droq_intr & BIT_ULL(oq_no)))
984 droq = oct->droq[oq_no];
986 if (droq->ops.poll_mode) {
987 droq->ops.napi_fn(droq);
988 oct_priv->napi_mask |= BIT_ULL(oq_no);
990 tasklet_schedule(&oct_priv->droq_tasklet);
997 * \brief Interrupt handler for octeon
999 * @param dev octeon device
1002 irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
1005 struct octeon_device *oct = (struct octeon_device *)dev;
1008 /* Disable our interrupts for the duration of ISR */
1009 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1011 ret = oct->fn_list.process_interrupt_regs(oct);
1013 if (ret == IRQ_HANDLED)
1014 liquidio_schedule_droq_pkt_handlers(oct);
1016 /* Re-enable our interrupts */
1017 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
1018 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
1024 * \brief Setup interrupt for octeon device
1025 * @param oct octeon device
1027 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
1029 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
1031 struct msix_entry *msix_entries;
1032 char *queue_irq_names = NULL;
1033 int i, num_interrupts = 0;
1034 int num_alloc_ioq_vectors;
1035 char *aux_irq_name = NULL;
1036 int num_ioq_vectors;
1040 oct->num_msix_irqs = num_ioqs;
1041 if (OCTEON_CN23XX_PF(oct)) {
1042 num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
1044 /* one non ioq interrupt for handling
1045 * sli_mac_pf_int_sum
1047 oct->num_msix_irqs += 1;
1048 } else if (OCTEON_CN23XX_VF(oct)) {
1049 num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
1052 /* allocate storage for the names assigned to each irq */
1053 oct->irq_name_storage =
1054 kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
1055 if (!oct->irq_name_storage) {
1056 dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
1060 queue_irq_names = oct->irq_name_storage;
1062 if (OCTEON_CN23XX_PF(oct))
1063 aux_irq_name = &queue_irq_names
1064 [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
1066 oct->msix_entries = kcalloc(oct->num_msix_irqs,
1067 sizeof(struct msix_entry),
1069 if (!oct->msix_entries) {
1070 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
1071 kfree(oct->irq_name_storage);
1072 oct->irq_name_storage = NULL;
1076 msix_entries = (struct msix_entry *)oct->msix_entries;
1078 /*Assumption is that pf msix vectors start from pf srn to pf to
1079 * trs and not from 0. if not change this code
1081 if (OCTEON_CN23XX_PF(oct)) {
1082 for (i = 0; i < oct->num_msix_irqs - 1; i++)
1083 msix_entries[i].entry =
1084 oct->sriov_info.pf_srn + i;
1086 msix_entries[oct->num_msix_irqs - 1].entry =
1087 oct->sriov_info.trs;
1088 } else if (OCTEON_CN23XX_VF(oct)) {
1089 for (i = 0; i < oct->num_msix_irqs; i++)
1090 msix_entries[i].entry = i;
1092 num_alloc_ioq_vectors = pci_enable_msix_range(
1093 oct->pci_dev, msix_entries,
1095 oct->num_msix_irqs);
1096 if (num_alloc_ioq_vectors < 0) {
1097 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1098 kfree(oct->msix_entries);
1099 oct->msix_entries = NULL;
1100 kfree(oct->irq_name_storage);
1101 oct->irq_name_storage = NULL;
1102 return num_alloc_ioq_vectors;
1105 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1107 num_ioq_vectors = oct->num_msix_irqs;
1108 /** For PF, there is one non-ioq interrupt handler */
1109 if (OCTEON_CN23XX_PF(oct)) {
1110 num_ioq_vectors -= 1;
1112 snprintf(aux_irq_name, INTRNAMSIZ,
1113 "LiquidIO%u-pf%u-aux", oct->octeon_id,
1115 irqret = request_irq(
1116 msix_entries[num_ioq_vectors].vector,
1117 liquidio_legacy_intr_handler, 0,
1120 dev_err(&oct->pci_dev->dev,
1121 "Request_irq failed for MSIX interrupt Error: %d\n",
1123 pci_disable_msix(oct->pci_dev);
1124 kfree(oct->msix_entries);
1125 kfree(oct->irq_name_storage);
1126 oct->irq_name_storage = NULL;
1127 oct->msix_entries = NULL;
1131 for (i = 0 ; i < num_ioq_vectors ; i++) {
1132 if (OCTEON_CN23XX_PF(oct))
1133 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1134 INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
1135 oct->octeon_id, oct->pf_num, i);
1137 if (OCTEON_CN23XX_VF(oct))
1138 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1139 INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
1140 oct->octeon_id, oct->vf_num, i);
1142 irqret = request_irq(msix_entries[i].vector,
1143 liquidio_msix_intr_handler, 0,
1144 &queue_irq_names[IRQ_NAME_OFF(i)],
1145 &oct->ioq_vector[i]);
1148 dev_err(&oct->pci_dev->dev,
1149 "Request_irq failed for MSIX interrupt Error: %d\n",
1151 /** Freeing the non-ioq irq vector here . */
1152 free_irq(msix_entries[num_ioq_vectors].vector,
1157 /** clearing affinity mask. */
1158 irq_set_affinity_hint(
1159 msix_entries[i].vector,
1161 free_irq(msix_entries[i].vector,
1162 &oct->ioq_vector[i]);
1164 pci_disable_msix(oct->pci_dev);
1165 kfree(oct->msix_entries);
1166 kfree(oct->irq_name_storage);
1167 oct->irq_name_storage = NULL;
1168 oct->msix_entries = NULL;
1171 oct->ioq_vector[i].vector = msix_entries[i].vector;
1172 /* assign the cpu mask for this msix interrupt vector */
1173 irq_set_affinity_hint(msix_entries[i].vector,
1174 &oct->ioq_vector[i].affinity_mask
1177 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1180 err = pci_enable_msi(oct->pci_dev);
1182 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1185 oct->flags |= LIO_FLAG_MSI_ENABLED;
1187 /* allocate storage for the names assigned to the irq */
1188 oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
1189 if (!oct->irq_name_storage)
1192 queue_irq_names = oct->irq_name_storage;
1194 if (OCTEON_CN23XX_PF(oct))
1195 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1196 "LiquidIO%u-pf%u-rxtx-%u",
1197 oct->octeon_id, oct->pf_num, 0);
1199 if (OCTEON_CN23XX_VF(oct))
1200 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1201 "LiquidIO%u-vf%u-rxtx-%u",
1202 oct->octeon_id, oct->vf_num, 0);
1204 irqret = request_irq(oct->pci_dev->irq,
1205 liquidio_legacy_intr_handler,
1207 &queue_irq_names[IRQ_NAME_OFF(0)], oct);
1209 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1210 pci_disable_msi(oct->pci_dev);
1211 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1213 kfree(oct->irq_name_storage);
1214 oct->irq_name_storage = NULL;
1221 static void liquidio_change_mtu_completion(struct octeon_device *oct,
1222 u32 status, void *buf)
1224 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1225 struct liquidio_if_cfg_context *ctx;
1227 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1230 dev_err(&oct->pci_dev->dev, "MTU change failed. Status: %llx\n",
1231 CVM_CAST64(status));
1232 WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_FAIL);
1234 WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_SUCCESS);
1237 /* This barrier is required to be sure that the response has been
1238 * written fully before waking up the handler
1242 wake_up_interruptible(&ctx->wc);
1246 * \brief Net device change_mtu
1247 * @param netdev network device
1249 int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
1251 struct lio *lio = GET_LIO(netdev);
1252 struct octeon_device *oct = lio->oct_dev;
1253 struct liquidio_if_cfg_context *ctx;
1254 struct octeon_soft_command *sc;
1255 union octnet_cmd *ncmd;
1259 ctx_size = sizeof(struct liquidio_if_cfg_context);
1260 sc = (struct octeon_soft_command *)
1261 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, ctx_size);
1263 ncmd = (union octnet_cmd *)sc->virtdptr;
1264 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1266 WRITE_ONCE(ctx->cond, 0);
1267 ctx->octeon_id = lio_get_device_id(oct);
1268 init_waitqueue_head(&ctx->wc);
1271 ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
1272 ncmd->s.param1 = new_mtu;
1274 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1276 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1278 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1279 OPCODE_NIC_CMD, 0, 0, 0);
1281 sc->callback = liquidio_change_mtu_completion;
1282 sc->callback_arg = sc;
1283 sc->wait_time = 100;
1285 ret = octeon_send_soft_command(oct, sc);
1286 if (ret == IQ_SEND_FAILED) {
1287 netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
1290 /* Sleep on a wait queue till the cond flag indicates that the
1291 * response arrived or timed-out.
1293 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR ||
1294 ctx->cond == LIO_CHANGE_MTU_FAIL) {
1295 octeon_free_soft_command(oct, sc);
1299 netdev->mtu = new_mtu;
1302 octeon_free_soft_command(oct, sc);
1306 int lio_wait_for_clean_oq(struct octeon_device *oct)
1308 int retry = 100, pending_pkts = 0;
1314 for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
1315 if (!(oct->io_qmask.oq & BIT_ULL(idx)))
1318 atomic_read(&oct->droq[idx]->pkts_pending);
1321 if (pending_pkts > 0)
1322 schedule_timeout_uninterruptible(1);
1324 } while (retry-- && pending_pkts);
1326 return pending_pkts;
1330 octnet_nic_stats_callback(struct octeon_device *oct_dev,
1331 u32 status, void *ptr)
1333 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1334 struct oct_nic_stats_resp *resp =
1335 (struct oct_nic_stats_resp *)sc->virtrptr;
1336 struct oct_nic_stats_ctrl *ctrl =
1337 (struct oct_nic_stats_ctrl *)sc->ctxptr;
1338 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1339 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1340 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1341 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1343 if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1344 octeon_swap_8B_data((u64 *)&resp->stats,
1345 (sizeof(struct oct_link_stats)) >> 3);
1347 /* RX link-level stats */
1348 rstats->total_rcvd = rsp_rstats->total_rcvd;
1349 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1350 rstats->total_bcst = rsp_rstats->total_bcst;
1351 rstats->total_mcst = rsp_rstats->total_mcst;
1352 rstats->runts = rsp_rstats->runts;
1353 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1354 /* Accounts for over/under-run of buffers */
1355 rstats->fifo_err = rsp_rstats->fifo_err;
1356 rstats->dmac_drop = rsp_rstats->dmac_drop;
1357 rstats->fcs_err = rsp_rstats->fcs_err;
1358 rstats->jabber_err = rsp_rstats->jabber_err;
1359 rstats->l2_err = rsp_rstats->l2_err;
1360 rstats->frame_err = rsp_rstats->frame_err;
1361 rstats->red_drops = rsp_rstats->red_drops;
1363 /* RX firmware stats */
1364 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1365 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1366 rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
1367 rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
1368 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1369 rstats->fw_err_link = rsp_rstats->fw_err_link;
1370 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1371 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1372 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1374 /* Number of packets that are LROed */
1375 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1376 /* Number of octets that are LROed */
1377 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1378 /* Number of LRO packets formed */
1379 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1380 /* Number of times lRO of packet aborted */
1381 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1382 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1383 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1384 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1385 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1386 /* intrmod: packet forward rate */
1387 rstats->fwd_rate = rsp_rstats->fwd_rate;
1389 /* TX link-level stats */
1390 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1391 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1392 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1393 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1394 tstats->ctl_sent = rsp_tstats->ctl_sent;
1395 /* Packets sent after one collision*/
1396 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1397 /* Packets sent after multiple collision*/
1398 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1399 /* Packets not sent due to max collisions */
1400 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1401 /* Packets not sent due to max deferrals */
1402 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1403 /* Accounts for over/under-run of buffers */
1404 tstats->fifo_err = rsp_tstats->fifo_err;
1405 tstats->runts = rsp_tstats->runts;
1406 /* Total number of collisions detected */
1407 tstats->total_collisions = rsp_tstats->total_collisions;
1409 /* firmware stats */
1410 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1411 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1412 tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
1413 tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
1414 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1415 tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1416 tstats->fw_err_link = rsp_tstats->fw_err_link;
1417 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1418 tstats->fw_tso = rsp_tstats->fw_tso;
1419 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1420 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1421 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1427 complete(&ctrl->complete);
1430 int octnet_get_link_stats(struct net_device *netdev)
1432 struct lio *lio = GET_LIO(netdev);
1433 struct octeon_device *oct_dev = lio->oct_dev;
1434 struct octeon_soft_command *sc;
1435 struct oct_nic_stats_ctrl *ctrl;
1436 struct oct_nic_stats_resp *resp;
1439 /* Alloc soft command */
1440 sc = (struct octeon_soft_command *)
1441 octeon_alloc_soft_command(oct_dev,
1443 sizeof(struct oct_nic_stats_resp),
1444 sizeof(struct octnic_ctrl_pkt));
1449 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1450 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1452 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1453 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1454 ctrl->netdev = netdev;
1455 init_completion(&ctrl->complete);
1457 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1459 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1460 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1462 sc->callback = octnet_nic_stats_callback;
1463 sc->callback_arg = sc;
1464 sc->wait_time = 500; /*in milli seconds*/
1466 retval = octeon_send_soft_command(oct_dev, sc);
1467 if (retval == IQ_SEND_FAILED) {
1468 octeon_free_soft_command(oct_dev, sc);
1472 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1474 if (resp->status != 1) {
1475 octeon_free_soft_command(oct_dev, sc);
1480 octeon_free_soft_command(oct_dev, sc);
1485 static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct,
1489 struct liquidio_nic_seapi_ctl_context *ctx;
1490 struct octeon_soft_command *sc = buf;
1494 oct = lio_get_device(ctx->octeon_id);
1496 dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n",
1498 CVM_CAST64(status));
1500 ctx->status = status;
1501 complete(&ctx->complete);
1504 int liquidio_set_speed(struct lio *lio, int speed)
1506 struct liquidio_nic_seapi_ctl_context *ctx;
1507 struct octeon_device *oct = lio->oct_dev;
1508 struct oct_nic_seapi_resp *resp;
1509 struct octeon_soft_command *sc;
1510 union octnet_cmd *ncmd;
1515 if (oct->speed_setting == speed)
1518 if (!OCTEON_CN23XX_PF(oct)) {
1519 dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
1524 ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
1525 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1526 sizeof(struct oct_nic_seapi_resp),
1531 ncmd = sc->virtdptr;
1533 resp = sc->virtrptr;
1534 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1536 ctx->octeon_id = lio_get_device_id(oct);
1538 init_completion(&ctx->complete);
1541 ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
1542 ncmd->s.param1 = speed;
1544 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1546 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1548 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1549 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1551 sc->callback = liquidio_nic_seapi_ctl_callback;
1552 sc->callback_arg = sc;
1553 sc->wait_time = 5000;
1555 retval = octeon_send_soft_command(oct, sc);
1556 if (retval == IQ_SEND_FAILED) {
1557 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1560 /* Wait for response or timeout */
1561 if (wait_for_completion_timeout(&ctx->complete,
1562 msecs_to_jiffies(10000)) == 0) {
1563 dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
1565 octeon_free_soft_command(oct, sc);
1569 retval = resp->status;
1572 dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
1574 octeon_free_soft_command(oct, sc);
1578 var = be32_to_cpu((__force __be32)resp->speed);
1580 dev_err(&oct->pci_dev->dev,
1581 "%s: setting failed speed= %x, expect %x\n",
1582 __func__, var, speed);
1585 oct->speed_setting = var;
1588 octeon_free_soft_command(oct, sc);
1593 int liquidio_get_speed(struct lio *lio)
1595 struct liquidio_nic_seapi_ctl_context *ctx;
1596 struct octeon_device *oct = lio->oct_dev;
1597 struct oct_nic_seapi_resp *resp;
1598 struct octeon_soft_command *sc;
1599 union octnet_cmd *ncmd;
1603 ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
1604 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1605 sizeof(struct oct_nic_seapi_resp),
1610 ncmd = sc->virtdptr;
1612 resp = sc->virtrptr;
1613 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1615 ctx->octeon_id = lio_get_device_id(oct);
1617 init_completion(&ctx->complete);
1620 ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
1622 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1624 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1626 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1627 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1629 sc->callback = liquidio_nic_seapi_ctl_callback;
1630 sc->callback_arg = sc;
1631 sc->wait_time = 5000;
1633 retval = octeon_send_soft_command(oct, sc);
1634 if (retval == IQ_SEND_FAILED) {
1635 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1636 oct->no_speed_setting = 1;
1637 oct->speed_setting = 25;
1641 if (wait_for_completion_timeout(&ctx->complete,
1642 msecs_to_jiffies(10000)) == 0) {
1643 dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
1646 oct->speed_setting = 25;
1647 oct->no_speed_setting = 1;
1649 octeon_free_soft_command(oct, sc);
1653 retval = resp->status;
1655 dev_err(&oct->pci_dev->dev,
1656 "%s failed retval=%d\n", __func__, retval);
1657 oct->no_speed_setting = 1;
1658 oct->speed_setting = 25;
1659 octeon_free_soft_command(oct, sc);
1664 var = be32_to_cpu((__force __be32)resp->speed);
1665 oct->speed_setting = var;
1666 if (var == 0xffff) {
1667 oct->no_speed_setting = 1;
1668 /* unable to access boot variables
1669 * get the default value based on the NIC type
1671 oct->speed_setting = 25;
1676 octeon_free_soft_command(oct, sc);