1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/interrupt.h>
69 #include <net/net_namespace.h>
70 #include <asm/hvcall.h>
71 #include <linux/atomic.h>
73 #include <asm/iommu.h>
74 #include <linux/uaccess.h>
75 #include <asm/firmware.h>
76 #include <linux/workqueue.h>
77 #include <linux/if_vlan.h>
81 static const char ibmvnic_driver_name[] = "ibmvnic";
82 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
84 MODULE_AUTHOR("Santiago Leon");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
89 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90 static int ibmvnic_remove(struct vio_dev *);
91 static void release_sub_crqs(struct ibmvnic_adapter *);
92 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
93 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
94 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
96 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
97 union sub_crq *sub_crq);
98 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
99 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
100 static int enable_scrq_irq(struct ibmvnic_adapter *,
101 struct ibmvnic_sub_crq_queue *);
102 static int disable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104 static int pending_scrq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108 static int ibmvnic_poll(struct napi_struct *napi, int data);
109 static void send_map_query(struct ibmvnic_adapter *adapter);
110 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111 static void send_request_unmap(struct ibmvnic_adapter *, u8);
112 static void send_login(struct ibmvnic_adapter *adapter);
113 static void send_cap_queries(struct ibmvnic_adapter *adapter);
114 static int init_sub_crqs(struct ibmvnic_adapter *);
115 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
116 static int ibmvnic_init(struct ibmvnic_adapter *);
117 static void release_crq_queue(struct ibmvnic_adapter *);
119 struct ibmvnic_stat {
120 char name[ETH_GSTRING_LEN];
124 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
125 offsetof(struct ibmvnic_statistics, stat))
126 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
128 static const struct ibmvnic_stat ibmvnic_stats[] = {
129 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
130 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
131 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
132 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
133 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
134 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
135 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
136 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
137 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
138 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
139 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
140 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
141 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
142 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
143 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
144 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
145 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
146 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
147 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
148 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
149 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
150 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
153 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
154 unsigned long length, unsigned long *number,
157 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
160 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
167 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
168 struct ibmvnic_long_term_buff *ltb, int size)
170 struct device *dev = &adapter->vdev->dev;
173 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
177 dev_err(dev, "Couldn't alloc long term buffer\n");
180 ltb->map_id = adapter->map_id;
183 init_completion(&adapter->fw_done);
184 send_request_map(adapter, ltb->addr,
185 ltb->size, ltb->map_id);
186 wait_for_completion(&adapter->fw_done);
188 if (adapter->fw_done_rc) {
189 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
190 adapter->fw_done_rc);
196 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
197 struct ibmvnic_long_term_buff *ltb)
199 struct device *dev = &adapter->vdev->dev;
204 /* VIOS automatically unmaps the long term buffer at remote
205 * end for the following resets:
206 * FAILOVER, MOBILITY, TIMEOUT.
208 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
209 adapter->reset_reason != VNIC_RESET_MOBILITY &&
210 adapter->reset_reason != VNIC_RESET_TIMEOUT)
211 send_request_unmap(adapter, ltb->map_id);
212 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
215 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
216 struct ibmvnic_long_term_buff *ltb)
218 memset(ltb->buff, 0, ltb->size);
220 init_completion(&adapter->fw_done);
221 send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
222 wait_for_completion(&adapter->fw_done);
224 if (adapter->fw_done_rc) {
225 dev_info(&adapter->vdev->dev,
226 "Reset failed, attempting to free and reallocate buffer\n");
227 free_long_term_buff(adapter, ltb);
228 return alloc_long_term_buff(adapter, ltb, ltb->size);
233 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
237 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
239 adapter->rx_pool[i].active = 0;
242 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
243 struct ibmvnic_rx_pool *pool)
245 int count = pool->size - atomic_read(&pool->available);
246 struct device *dev = &adapter->vdev->dev;
247 int buffers_added = 0;
248 unsigned long lpar_rc;
249 union sub_crq sub_crq;
262 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
263 be32_to_cpu(adapter->login_rsp_buf->
266 for (i = 0; i < count; ++i) {
267 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
269 dev_err(dev, "Couldn't replenish rx buff\n");
270 adapter->replenish_no_mem++;
274 index = pool->free_map[pool->next_free];
276 if (pool->rx_buff[index].skb)
277 dev_err(dev, "Inconsistent free_map!\n");
279 /* Copy the skb to the long term mapped DMA buffer */
280 offset = index * pool->buff_size;
281 dst = pool->long_term_buff.buff + offset;
282 memset(dst, 0, pool->buff_size);
283 dma_addr = pool->long_term_buff.addr + offset;
284 pool->rx_buff[index].data = dst;
286 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
287 pool->rx_buff[index].dma = dma_addr;
288 pool->rx_buff[index].skb = skb;
289 pool->rx_buff[index].pool_index = pool->index;
290 pool->rx_buff[index].size = pool->buff_size;
292 memset(&sub_crq, 0, sizeof(sub_crq));
293 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
294 sub_crq.rx_add.correlator =
295 cpu_to_be64((u64)&pool->rx_buff[index]);
296 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
297 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
299 /* The length field of the sCRQ is defined to be 24 bits so the
300 * buffer size needs to be left shifted by a byte before it is
301 * converted to big endian to prevent the last byte from being
304 #ifdef __LITTLE_ENDIAN__
307 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
309 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
311 if (lpar_rc != H_SUCCESS)
315 adapter->replenish_add_buff_success++;
316 pool->next_free = (pool->next_free + 1) % pool->size;
318 atomic_add(buffers_added, &pool->available);
322 dev_info(dev, "replenish pools failure\n");
323 pool->free_map[pool->next_free] = index;
324 pool->rx_buff[index].skb = NULL;
325 if (!dma_mapping_error(dev, dma_addr))
326 dma_unmap_single(dev, dma_addr, pool->buff_size,
329 dev_kfree_skb_any(skb);
330 adapter->replenish_add_buff_failure++;
331 atomic_add(buffers_added, &pool->available);
333 if (lpar_rc == H_CLOSED) {
334 /* Disable buffer pool replenishment and report carrier off if
335 * queue is closed. Firmware guarantees that a signal will
336 * be sent to the driver, triggering a reset.
338 deactivate_rx_pools(adapter);
339 netif_carrier_off(adapter->netdev);
343 static void replenish_pools(struct ibmvnic_adapter *adapter)
347 adapter->replenish_task_cycles++;
348 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
350 if (adapter->rx_pool[i].active)
351 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
355 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
357 kfree(adapter->tx_stats_buffers);
358 kfree(adapter->rx_stats_buffers);
361 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
363 adapter->tx_stats_buffers =
364 kcalloc(adapter->req_tx_queues,
365 sizeof(struct ibmvnic_tx_queue_stats),
367 if (!adapter->tx_stats_buffers)
370 adapter->rx_stats_buffers =
371 kcalloc(adapter->req_rx_queues,
372 sizeof(struct ibmvnic_rx_queue_stats),
374 if (!adapter->rx_stats_buffers)
380 static void release_stats_token(struct ibmvnic_adapter *adapter)
382 struct device *dev = &adapter->vdev->dev;
384 if (!adapter->stats_token)
387 dma_unmap_single(dev, adapter->stats_token,
388 sizeof(struct ibmvnic_statistics),
390 adapter->stats_token = 0;
393 static int init_stats_token(struct ibmvnic_adapter *adapter)
395 struct device *dev = &adapter->vdev->dev;
398 stok = dma_map_single(dev, &adapter->stats,
399 sizeof(struct ibmvnic_statistics),
401 if (dma_mapping_error(dev, stok)) {
402 dev_err(dev, "Couldn't map stats buffer\n");
406 adapter->stats_token = stok;
407 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
411 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
413 struct ibmvnic_rx_pool *rx_pool;
417 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
418 for (i = 0; i < rx_scrqs; i++) {
419 rx_pool = &adapter->rx_pool[i];
421 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
423 rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff);
427 for (j = 0; j < rx_pool->size; j++)
428 rx_pool->free_map[j] = j;
430 memset(rx_pool->rx_buff, 0,
431 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
433 atomic_set(&rx_pool->available, 0);
434 rx_pool->next_alloc = 0;
435 rx_pool->next_free = 0;
442 static void release_rx_pools(struct ibmvnic_adapter *adapter)
444 struct ibmvnic_rx_pool *rx_pool;
448 if (!adapter->rx_pool)
451 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
452 for (i = 0; i < rx_scrqs; i++) {
453 rx_pool = &adapter->rx_pool[i];
455 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
457 kfree(rx_pool->free_map);
458 free_long_term_buff(adapter, &rx_pool->long_term_buff);
460 if (!rx_pool->rx_buff)
463 for (j = 0; j < rx_pool->size; j++) {
464 if (rx_pool->rx_buff[j].skb) {
465 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
466 rx_pool->rx_buff[j].skb = NULL;
470 kfree(rx_pool->rx_buff);
473 kfree(adapter->rx_pool);
474 adapter->rx_pool = NULL;
477 static int init_rx_pools(struct net_device *netdev)
479 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
480 struct device *dev = &adapter->vdev->dev;
481 struct ibmvnic_rx_pool *rx_pool;
487 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
488 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
489 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
491 adapter->rx_pool = kcalloc(rxadd_subcrqs,
492 sizeof(struct ibmvnic_rx_pool),
494 if (!adapter->rx_pool) {
495 dev_err(dev, "Failed to allocate rx pools\n");
499 for (i = 0; i < rxadd_subcrqs; i++) {
500 rx_pool = &adapter->rx_pool[i];
502 netdev_dbg(adapter->netdev,
503 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
504 i, adapter->req_rx_add_entries_per_subcrq,
505 be64_to_cpu(size_array[i]));
507 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
509 rx_pool->buff_size = be64_to_cpu(size_array[i]);
512 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
514 if (!rx_pool->free_map) {
515 release_rx_pools(adapter);
519 rx_pool->rx_buff = kcalloc(rx_pool->size,
520 sizeof(struct ibmvnic_rx_buff),
522 if (!rx_pool->rx_buff) {
523 dev_err(dev, "Couldn't alloc rx buffers\n");
524 release_rx_pools(adapter);
528 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
529 rx_pool->size * rx_pool->buff_size)) {
530 release_rx_pools(adapter);
534 for (j = 0; j < rx_pool->size; ++j)
535 rx_pool->free_map[j] = j;
537 atomic_set(&rx_pool->available, 0);
538 rx_pool->next_alloc = 0;
539 rx_pool->next_free = 0;
545 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
547 struct ibmvnic_tx_pool *tx_pool;
551 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
552 for (i = 0; i < tx_scrqs; i++) {
553 netdev_dbg(adapter->netdev, "Re-setting tx_pool[%d]\n", i);
555 tx_pool = &adapter->tx_pool[i];
557 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
561 memset(tx_pool->tx_buff, 0,
562 adapter->req_tx_entries_per_subcrq *
563 sizeof(struct ibmvnic_tx_buff));
565 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
566 tx_pool->free_map[j] = j;
568 tx_pool->consumer_index = 0;
569 tx_pool->producer_index = 0;
575 static void release_tx_pools(struct ibmvnic_adapter *adapter)
577 struct ibmvnic_tx_pool *tx_pool;
580 if (!adapter->tx_pool)
583 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
584 for (i = 0; i < tx_scrqs; i++) {
585 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
586 tx_pool = &adapter->tx_pool[i];
587 kfree(tx_pool->tx_buff);
588 free_long_term_buff(adapter, &tx_pool->long_term_buff);
589 kfree(tx_pool->free_map);
592 kfree(adapter->tx_pool);
593 adapter->tx_pool = NULL;
596 static int init_tx_pools(struct net_device *netdev)
598 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
599 struct device *dev = &adapter->vdev->dev;
600 struct ibmvnic_tx_pool *tx_pool;
604 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
605 adapter->tx_pool = kcalloc(tx_subcrqs,
606 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
607 if (!adapter->tx_pool)
610 for (i = 0; i < tx_subcrqs; i++) {
611 tx_pool = &adapter->tx_pool[i];
613 netdev_dbg(adapter->netdev,
614 "Initializing tx_pool[%d], %lld buffs\n",
615 i, adapter->req_tx_entries_per_subcrq);
617 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
618 sizeof(struct ibmvnic_tx_buff),
620 if (!tx_pool->tx_buff) {
621 dev_err(dev, "tx pool buffer allocation failed\n");
622 release_tx_pools(adapter);
626 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
627 adapter->req_tx_entries_per_subcrq *
629 release_tx_pools(adapter);
633 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
634 sizeof(int), GFP_KERNEL);
635 if (!tx_pool->free_map) {
636 release_tx_pools(adapter);
640 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
641 tx_pool->free_map[j] = j;
643 tx_pool->consumer_index = 0;
644 tx_pool->producer_index = 0;
650 static void release_error_buffers(struct ibmvnic_adapter *adapter)
652 struct device *dev = &adapter->vdev->dev;
653 struct ibmvnic_error_buff *error_buff, *tmp;
656 spin_lock_irqsave(&adapter->error_list_lock, flags);
657 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
658 list_del(&error_buff->list);
659 dma_unmap_single(dev, error_buff->dma, error_buff->len,
661 kfree(error_buff->buff);
664 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
667 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
671 if (adapter->napi_enabled)
674 for (i = 0; i < adapter->req_rx_queues; i++)
675 napi_enable(&adapter->napi[i]);
677 adapter->napi_enabled = true;
680 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
684 if (!adapter->napi_enabled)
687 for (i = 0; i < adapter->req_rx_queues; i++) {
688 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
689 napi_disable(&adapter->napi[i]);
692 adapter->napi_enabled = false;
695 static int ibmvnic_login(struct net_device *netdev)
697 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
698 unsigned long timeout = msecs_to_jiffies(30000);
699 struct device *dev = &adapter->vdev->dev;
703 if (adapter->renegotiate) {
704 adapter->renegotiate = false;
705 release_sub_crqs(adapter);
707 reinit_completion(&adapter->init_done);
708 send_cap_queries(adapter);
709 if (!wait_for_completion_timeout(&adapter->init_done,
711 dev_err(dev, "Capabilities query timeout\n");
714 rc = init_sub_crqs(adapter);
717 "Initialization of SCRQ's failed\n");
720 rc = init_sub_crq_irqs(adapter);
723 "Initialization of SCRQ's irqs failed\n");
728 reinit_completion(&adapter->init_done);
730 if (!wait_for_completion_timeout(&adapter->init_done,
732 dev_err(dev, "Login timeout\n");
735 } while (adapter->renegotiate);
740 static void release_resources(struct ibmvnic_adapter *adapter)
744 release_tx_pools(adapter);
745 release_rx_pools(adapter);
747 release_stats_token(adapter);
748 release_stats_buffers(adapter);
749 release_error_buffers(adapter);
752 for (i = 0; i < adapter->req_rx_queues; i++) {
753 if (&adapter->napi[i]) {
754 netdev_dbg(adapter->netdev,
755 "Releasing napi[%d]\n", i);
756 netif_napi_del(&adapter->napi[i]);
762 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
764 struct net_device *netdev = adapter->netdev;
765 unsigned long timeout = msecs_to_jiffies(30000);
766 union ibmvnic_crq crq;
770 netdev_dbg(netdev, "setting link state %d\n", link_state);
772 memset(&crq, 0, sizeof(crq));
773 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
774 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
775 crq.logical_link_state.link_state = link_state;
780 reinit_completion(&adapter->init_done);
781 rc = ibmvnic_send_crq(adapter, &crq);
783 netdev_err(netdev, "Failed to set link state\n");
787 if (!wait_for_completion_timeout(&adapter->init_done,
789 netdev_err(netdev, "timeout setting link state\n");
793 if (adapter->init_done_rc == 1) {
794 /* Partuial success, delay and re-send */
803 static int set_real_num_queues(struct net_device *netdev)
805 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
808 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
809 adapter->req_tx_queues, adapter->req_rx_queues);
811 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
813 netdev_err(netdev, "failed to set the number of tx queues\n");
817 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
819 netdev_err(netdev, "failed to set the number of rx queues\n");
824 static int init_resources(struct ibmvnic_adapter *adapter)
826 struct net_device *netdev = adapter->netdev;
829 rc = set_real_num_queues(netdev);
833 rc = init_stats_buffers(adapter);
837 rc = init_stats_token(adapter);
842 adapter->napi = kcalloc(adapter->req_rx_queues,
843 sizeof(struct napi_struct), GFP_KERNEL);
847 for (i = 0; i < adapter->req_rx_queues; i++) {
848 netdev_dbg(netdev, "Adding napi[%d]\n", i);
849 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
853 send_map_query(adapter);
855 rc = init_rx_pools(netdev);
859 rc = init_tx_pools(netdev);
863 static int __ibmvnic_open(struct net_device *netdev)
865 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
866 enum vnic_state prev_state = adapter->state;
869 adapter->state = VNIC_OPENING;
870 replenish_pools(adapter);
871 ibmvnic_napi_enable(adapter);
873 /* We're ready to receive frames, enable the sub-crq interrupts and
874 * set the logical link state to up
876 for (i = 0; i < adapter->req_rx_queues; i++) {
877 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
878 if (prev_state == VNIC_CLOSED)
879 enable_irq(adapter->rx_scrq[i]->irq);
881 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
884 for (i = 0; i < adapter->req_tx_queues; i++) {
885 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
886 if (prev_state == VNIC_CLOSED)
887 enable_irq(adapter->tx_scrq[i]->irq);
889 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
892 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
894 ibmvnic_napi_disable(adapter);
895 release_resources(adapter);
899 netif_tx_start_all_queues(netdev);
901 if (prev_state == VNIC_CLOSED) {
902 for (i = 0; i < adapter->req_rx_queues; i++)
903 napi_schedule(&adapter->napi[i]);
906 adapter->state = VNIC_OPEN;
910 static int ibmvnic_open(struct net_device *netdev)
912 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
915 mutex_lock(&adapter->reset_lock);
917 if (adapter->state != VNIC_CLOSED) {
918 rc = ibmvnic_login(netdev);
920 mutex_unlock(&adapter->reset_lock);
924 rc = init_resources(adapter);
926 netdev_err(netdev, "failed to initialize resources\n");
927 release_resources(adapter);
928 mutex_unlock(&adapter->reset_lock);
933 rc = __ibmvnic_open(netdev);
934 netif_carrier_on(netdev);
935 mutex_unlock(&adapter->reset_lock);
940 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
942 struct ibmvnic_rx_pool *rx_pool;
947 if (!adapter->rx_pool)
950 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
951 rx_entries = adapter->req_rx_add_entries_per_subcrq;
953 /* Free any remaining skbs in the rx buffer pools */
954 for (i = 0; i < rx_scrqs; i++) {
955 rx_pool = &adapter->rx_pool[i];
959 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
960 for (j = 0; j < rx_entries; j++) {
961 if (rx_pool->rx_buff[j].skb) {
962 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
963 rx_pool->rx_buff[j].skb = NULL;
969 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
971 struct ibmvnic_tx_pool *tx_pool;
976 if (!adapter->tx_pool)
979 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
980 tx_entries = adapter->req_tx_entries_per_subcrq;
982 /* Free any remaining skbs in the tx buffer pools */
983 for (i = 0; i < tx_scrqs; i++) {
984 tx_pool = &adapter->tx_pool[i];
988 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
989 for (j = 0; j < tx_entries; j++) {
990 if (tx_pool->tx_buff[j].skb) {
991 dev_kfree_skb_any(tx_pool->tx_buff[j].skb);
992 tx_pool->tx_buff[j].skb = NULL;
998 static int __ibmvnic_close(struct net_device *netdev)
1000 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1004 adapter->state = VNIC_CLOSING;
1006 /* ensure that transmissions are stopped if called by do_reset */
1007 if (adapter->resetting)
1008 netif_tx_disable(netdev);
1010 netif_tx_stop_all_queues(netdev);
1012 ibmvnic_napi_disable(adapter);
1014 if (adapter->tx_scrq) {
1015 for (i = 0; i < adapter->req_tx_queues; i++)
1016 if (adapter->tx_scrq[i]->irq) {
1017 netdev_dbg(adapter->netdev,
1018 "Disabling tx_scrq[%d] irq\n", i);
1019 disable_irq(adapter->tx_scrq[i]->irq);
1023 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1027 if (adapter->rx_scrq) {
1028 for (i = 0; i < adapter->req_rx_queues; i++) {
1031 while (pending_scrq(adapter, adapter->rx_scrq[i])) {
1039 if (adapter->rx_scrq[i]->irq) {
1040 netdev_dbg(adapter->netdev,
1041 "Disabling rx_scrq[%d] irq\n", i);
1042 disable_irq(adapter->rx_scrq[i]->irq);
1046 clean_rx_pools(adapter);
1047 clean_tx_pools(adapter);
1048 adapter->state = VNIC_CLOSED;
1052 static int ibmvnic_close(struct net_device *netdev)
1054 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1057 mutex_lock(&adapter->reset_lock);
1058 rc = __ibmvnic_close(netdev);
1059 mutex_unlock(&adapter->reset_lock);
1065 * build_hdr_data - creates L2/L3/L4 header data buffer
1066 * @hdr_field - bitfield determining needed headers
1067 * @skb - socket buffer
1068 * @hdr_len - array of header lengths
1069 * @tot_len - total length of data
1071 * Reads hdr_field to determine which headers are needed by firmware.
1072 * Builds a buffer containing these headers. Saves individual header
1073 * lengths and total buffer length to be used to build descriptors.
1075 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1076 int *hdr_len, u8 *hdr_data)
1081 hdr_len[0] = sizeof(struct ethhdr);
1083 if (skb->protocol == htons(ETH_P_IP)) {
1084 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1085 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1086 hdr_len[2] = tcp_hdrlen(skb);
1087 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1088 hdr_len[2] = sizeof(struct udphdr);
1089 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1090 hdr_len[1] = sizeof(struct ipv6hdr);
1091 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1092 hdr_len[2] = tcp_hdrlen(skb);
1093 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1094 hdr_len[2] = sizeof(struct udphdr);
1097 memset(hdr_data, 0, 120);
1098 if ((hdr_field >> 6) & 1) {
1099 hdr = skb_mac_header(skb);
1100 memcpy(hdr_data, hdr, hdr_len[0]);
1104 if ((hdr_field >> 5) & 1) {
1105 hdr = skb_network_header(skb);
1106 memcpy(hdr_data + len, hdr, hdr_len[1]);
1110 if ((hdr_field >> 4) & 1) {
1111 hdr = skb_transport_header(skb);
1112 memcpy(hdr_data + len, hdr, hdr_len[2]);
1119 * create_hdr_descs - create header and header extension descriptors
1120 * @hdr_field - bitfield determining needed headers
1121 * @data - buffer containing header data
1122 * @len - length of data buffer
1123 * @hdr_len - array of individual header lengths
1124 * @scrq_arr - descriptor array
1126 * Creates header and, if needed, header extension descriptors and
1127 * places them in a descriptor array, scrq_arr
1130 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1131 union sub_crq *scrq_arr)
1133 union sub_crq hdr_desc;
1139 while (tmp_len > 0) {
1140 cur = hdr_data + len - tmp_len;
1142 memset(&hdr_desc, 0, sizeof(hdr_desc));
1143 if (cur != hdr_data) {
1144 data = hdr_desc.hdr_ext.data;
1145 tmp = tmp_len > 29 ? 29 : tmp_len;
1146 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1147 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1148 hdr_desc.hdr_ext.len = tmp;
1150 data = hdr_desc.hdr.data;
1151 tmp = tmp_len > 24 ? 24 : tmp_len;
1152 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1153 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1154 hdr_desc.hdr.len = tmp;
1155 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1156 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1157 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1158 hdr_desc.hdr.flag = hdr_field << 1;
1160 memcpy(data, cur, tmp);
1162 *scrq_arr = hdr_desc;
1171 * build_hdr_descs_arr - build a header descriptor array
1172 * @skb - socket buffer
1173 * @num_entries - number of descriptors to be sent
1174 * @subcrq - first TX descriptor
1175 * @hdr_field - bit field determining which headers will be sent
1177 * This function will build a TX descriptor array with applicable
1178 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1181 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1182 int *num_entries, u8 hdr_field)
1184 int hdr_len[3] = {0, 0, 0};
1186 u8 *hdr_data = txbuff->hdr_data;
1188 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1190 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1191 txbuff->indir_arr + 1);
1194 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1196 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1197 int queue_num = skb_get_queue_mapping(skb);
1198 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1199 struct device *dev = &adapter->vdev->dev;
1200 struct ibmvnic_tx_buff *tx_buff = NULL;
1201 struct ibmvnic_sub_crq_queue *tx_scrq;
1202 struct ibmvnic_tx_pool *tx_pool;
1203 unsigned int tx_send_failed = 0;
1204 unsigned int tx_map_failed = 0;
1205 unsigned int tx_dropped = 0;
1206 unsigned int tx_packets = 0;
1207 unsigned int tx_bytes = 0;
1208 dma_addr_t data_dma_addr;
1209 struct netdev_queue *txq;
1210 unsigned long lpar_rc;
1211 union sub_crq tx_crq;
1212 unsigned int offset;
1213 int num_entries = 1;
1219 if (adapter->resetting) {
1220 if (!netif_subqueue_stopped(netdev, skb))
1221 netif_stop_subqueue(netdev, queue_num);
1222 dev_kfree_skb_any(skb);
1230 tx_pool = &adapter->tx_pool[queue_num];
1231 tx_scrq = adapter->tx_scrq[queue_num];
1232 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1233 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1234 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1236 index = tx_pool->free_map[tx_pool->consumer_index];
1237 offset = index * adapter->req_mtu;
1238 dst = tx_pool->long_term_buff.buff + offset;
1239 memset(dst, 0, adapter->req_mtu);
1240 skb_copy_from_linear_data(skb, dst, skb->len);
1241 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1243 tx_pool->consumer_index =
1244 (tx_pool->consumer_index + 1) %
1245 adapter->req_tx_entries_per_subcrq;
1247 tx_buff = &tx_pool->tx_buff[index];
1249 tx_buff->data_dma[0] = data_dma_addr;
1250 tx_buff->data_len[0] = skb->len;
1251 tx_buff->index = index;
1252 tx_buff->pool_index = queue_num;
1253 tx_buff->last_frag = true;
1255 memset(&tx_crq, 0, sizeof(tx_crq));
1256 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1257 tx_crq.v1.type = IBMVNIC_TX_DESC;
1258 tx_crq.v1.n_crq_elem = 1;
1259 tx_crq.v1.n_sge = 1;
1260 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1261 tx_crq.v1.correlator = cpu_to_be32(index);
1262 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1263 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1264 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1266 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1267 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1268 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1271 if (skb->protocol == htons(ETH_P_IP)) {
1272 if (ip_hdr(skb)->version == 4)
1273 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1274 else if (ip_hdr(skb)->version == 6)
1275 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1277 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1278 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1279 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
1280 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1283 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1284 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1287 /* determine if l2/3/4 headers are sent to firmware */
1288 if ((*hdrs >> 7) & 1 &&
1289 (skb->protocol == htons(ETH_P_IP) ||
1290 skb->protocol == htons(ETH_P_IPV6))) {
1291 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1292 tx_crq.v1.n_crq_elem = num_entries;
1293 tx_buff->indir_arr[0] = tx_crq;
1294 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1295 sizeof(tx_buff->indir_arr),
1297 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1298 dev_kfree_skb_any(skb);
1299 tx_buff->skb = NULL;
1300 if (!firmware_has_feature(FW_FEATURE_CMO))
1301 dev_err(dev, "tx: unable to map descriptor array\n");
1307 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1308 (u64)tx_buff->indir_dma,
1311 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1314 if (lpar_rc != H_SUCCESS) {
1315 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
1317 if (tx_pool->consumer_index == 0)
1318 tx_pool->consumer_index =
1319 adapter->req_tx_entries_per_subcrq - 1;
1321 tx_pool->consumer_index--;
1323 dev_kfree_skb_any(skb);
1324 tx_buff->skb = NULL;
1326 if (lpar_rc == H_CLOSED) {
1327 /* Disable TX and report carrier off if queue is closed.
1328 * Firmware guarantees that a signal will be sent to the
1329 * driver, triggering a reset or some other action.
1331 netif_tx_stop_all_queues(netdev);
1332 netif_carrier_off(netdev);
1341 if (atomic_inc_return(&tx_scrq->used)
1342 >= adapter->req_tx_entries_per_subcrq) {
1343 netdev_info(netdev, "Stopping queue %d\n", queue_num);
1344 netif_stop_subqueue(netdev, queue_num);
1348 tx_bytes += skb->len;
1349 txq->trans_start = jiffies;
1353 netdev->stats.tx_dropped += tx_dropped;
1354 netdev->stats.tx_bytes += tx_bytes;
1355 netdev->stats.tx_packets += tx_packets;
1356 adapter->tx_send_failed += tx_send_failed;
1357 adapter->tx_map_failed += tx_map_failed;
1358 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1359 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1360 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1365 static void ibmvnic_set_multi(struct net_device *netdev)
1367 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1368 struct netdev_hw_addr *ha;
1369 union ibmvnic_crq crq;
1371 memset(&crq, 0, sizeof(crq));
1372 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1373 crq.request_capability.cmd = REQUEST_CAPABILITY;
1375 if (netdev->flags & IFF_PROMISC) {
1376 if (!adapter->promisc_supported)
1379 if (netdev->flags & IFF_ALLMULTI) {
1380 /* Accept all multicast */
1381 memset(&crq, 0, sizeof(crq));
1382 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1383 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1384 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1385 ibmvnic_send_crq(adapter, &crq);
1386 } else if (netdev_mc_empty(netdev)) {
1387 /* Reject all multicast */
1388 memset(&crq, 0, sizeof(crq));
1389 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1390 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1391 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1392 ibmvnic_send_crq(adapter, &crq);
1394 /* Accept one or more multicast(s) */
1395 netdev_for_each_mc_addr(ha, netdev) {
1396 memset(&crq, 0, sizeof(crq));
1397 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1398 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1399 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1400 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1402 ibmvnic_send_crq(adapter, &crq);
1408 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1410 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1411 struct sockaddr *addr = p;
1412 union ibmvnic_crq crq;
1414 if (!is_valid_ether_addr(addr->sa_data))
1415 return -EADDRNOTAVAIL;
1417 memset(&crq, 0, sizeof(crq));
1418 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1419 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1420 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1421 ibmvnic_send_crq(adapter, &crq);
1422 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1427 * do_reset returns zero if we are able to keep processing reset events, or
1428 * non-zero if we hit a fatal error and must halt.
1430 static int do_reset(struct ibmvnic_adapter *adapter,
1431 struct ibmvnic_rwi *rwi, u32 reset_state)
1433 struct net_device *netdev = adapter->netdev;
1436 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1439 netif_carrier_off(netdev);
1440 adapter->reset_reason = rwi->reset_reason;
1442 if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1443 rc = ibmvnic_reenable_crq_queue(adapter);
1448 rc = __ibmvnic_close(netdev);
1452 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1453 /* remove the closed state so when we call open it appears
1454 * we are coming from the probed state.
1456 adapter->state = VNIC_PROBED;
1458 rc = ibmvnic_init(adapter);
1462 /* If the adapter was in PROBE state prior to the reset,
1465 if (reset_state == VNIC_PROBED)
1468 rc = ibmvnic_login(netdev);
1470 adapter->state = reset_state;
1474 rc = reset_tx_pools(adapter);
1478 rc = reset_rx_pools(adapter);
1482 if (reset_state == VNIC_CLOSED)
1486 rc = __ibmvnic_open(netdev);
1488 if (list_empty(&adapter->rwi_list))
1489 adapter->state = VNIC_CLOSED;
1491 adapter->state = reset_state;
1496 /* refresh device's multicast list */
1497 ibmvnic_set_multi(netdev);
1499 if (adapter->reset_reason != VNIC_RESET_FAILOVER)
1500 netdev_notify_peers(netdev);
1502 netif_carrier_on(netdev);
1507 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1509 struct ibmvnic_rwi *rwi;
1511 mutex_lock(&adapter->rwi_lock);
1513 if (!list_empty(&adapter->rwi_list)) {
1514 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1516 list_del(&rwi->list);
1521 mutex_unlock(&adapter->rwi_lock);
1525 static void free_all_rwi(struct ibmvnic_adapter *adapter)
1527 struct ibmvnic_rwi *rwi;
1529 rwi = get_next_rwi(adapter);
1532 rwi = get_next_rwi(adapter);
1536 static void __ibmvnic_reset(struct work_struct *work)
1538 struct ibmvnic_rwi *rwi;
1539 struct ibmvnic_adapter *adapter;
1540 struct net_device *netdev;
1544 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1545 netdev = adapter->netdev;
1547 mutex_lock(&adapter->reset_lock);
1548 adapter->resetting = true;
1549 reset_state = adapter->state;
1551 rwi = get_next_rwi(adapter);
1553 rc = do_reset(adapter, rwi, reset_state);
1558 rwi = get_next_rwi(adapter);
1562 netdev_dbg(adapter->netdev, "Reset failed\n");
1563 free_all_rwi(adapter);
1564 mutex_unlock(&adapter->reset_lock);
1568 adapter->resetting = false;
1569 mutex_unlock(&adapter->reset_lock);
1572 static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
1573 enum ibmvnic_reset_reason reason)
1575 struct ibmvnic_rwi *rwi, *tmp;
1576 struct net_device *netdev = adapter->netdev;
1577 struct list_head *entry;
1579 if (adapter->state == VNIC_REMOVING ||
1580 adapter->state == VNIC_REMOVED) {
1581 netdev_dbg(netdev, "Adapter removing, skipping reset\n");
1585 if (adapter->state == VNIC_PROBING) {
1586 netdev_warn(netdev, "Adapter reset during probe\n");
1587 adapter->init_done_rc = EAGAIN;
1591 mutex_lock(&adapter->rwi_lock);
1593 list_for_each(entry, &adapter->rwi_list) {
1594 tmp = list_entry(entry, struct ibmvnic_rwi, list);
1595 if (tmp->reset_reason == reason) {
1596 netdev_dbg(netdev, "Skipping matching reset\n");
1597 mutex_unlock(&adapter->rwi_lock);
1602 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
1604 mutex_unlock(&adapter->rwi_lock);
1605 ibmvnic_close(netdev);
1609 rwi->reset_reason = reason;
1610 list_add_tail(&rwi->list, &adapter->rwi_list);
1611 mutex_unlock(&adapter->rwi_lock);
1613 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
1614 schedule_work(&adapter->ibmvnic_reset);
1617 static void ibmvnic_tx_timeout(struct net_device *dev)
1619 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1621 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
1624 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1625 struct ibmvnic_rx_buff *rx_buff)
1627 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1629 rx_buff->skb = NULL;
1631 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1632 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1634 atomic_dec(&pool->available);
1637 static int ibmvnic_poll(struct napi_struct *napi, int budget)
1639 struct net_device *netdev = napi->dev;
1640 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1641 int scrq_num = (int)(napi - adapter->napi);
1642 int frames_processed = 0;
1645 while (frames_processed < budget) {
1646 struct sk_buff *skb;
1647 struct ibmvnic_rx_buff *rx_buff;
1648 union sub_crq *next;
1653 if (unlikely(adapter->resetting)) {
1654 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1655 napi_complete_done(napi, frames_processed);
1656 return frames_processed;
1659 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1661 /* The queue entry at the current index is peeked at above
1662 * to determine that there is a valid descriptor awaiting
1663 * processing. We want to be sure that the current slot
1664 * holds a valid descriptor before reading its contents.
1667 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1669 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1670 rx_comp.correlator);
1671 /* do error checking */
1672 if (next->rx_comp.rc) {
1673 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
1674 be16_to_cpu(next->rx_comp.rc));
1675 /* free the entry */
1676 next->rx_comp.first = 0;
1677 dev_kfree_skb_any(rx_buff->skb);
1678 remove_buff_from_pool(adapter, rx_buff);
1680 } else if (!rx_buff->skb) {
1681 /* free the entry */
1682 next->rx_comp.first = 0;
1683 remove_buff_from_pool(adapter, rx_buff);
1687 length = be32_to_cpu(next->rx_comp.len);
1688 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1689 flags = next->rx_comp.flags;
1691 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1694 /* VLAN Header has been stripped by the system firmware and
1695 * needs to be inserted by the driver
1697 if (adapter->rx_vlan_header_insertion &&
1698 (flags & IBMVNIC_VLAN_STRIPPED))
1699 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1700 ntohs(next->rx_comp.vlan_tci));
1702 /* free the entry */
1703 next->rx_comp.first = 0;
1704 remove_buff_from_pool(adapter, rx_buff);
1706 skb_put(skb, length);
1707 skb->protocol = eth_type_trans(skb, netdev);
1708 skb_record_rx_queue(skb, scrq_num);
1710 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1711 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1712 skb->ip_summed = CHECKSUM_UNNECESSARY;
1716 napi_gro_receive(napi, skb); /* send it up */
1717 netdev->stats.rx_packets++;
1718 netdev->stats.rx_bytes += length;
1719 adapter->rx_stats_buffers[scrq_num].packets++;
1720 adapter->rx_stats_buffers[scrq_num].bytes += length;
1724 if (adapter->state != VNIC_CLOSING)
1725 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
1727 if (frames_processed < budget) {
1728 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1729 napi_complete_done(napi, frames_processed);
1730 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1731 napi_reschedule(napi)) {
1732 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1736 return frames_processed;
1739 #ifdef CONFIG_NET_POLL_CONTROLLER
1740 static void ibmvnic_netpoll_controller(struct net_device *dev)
1742 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1745 replenish_pools(netdev_priv(dev));
1746 for (i = 0; i < adapter->req_rx_queues; i++)
1747 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1748 adapter->rx_scrq[i]);
1752 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
1757 static const struct net_device_ops ibmvnic_netdev_ops = {
1758 .ndo_open = ibmvnic_open,
1759 .ndo_stop = ibmvnic_close,
1760 .ndo_start_xmit = ibmvnic_xmit,
1761 .ndo_set_rx_mode = ibmvnic_set_multi,
1762 .ndo_set_mac_address = ibmvnic_set_mac,
1763 .ndo_validate_addr = eth_validate_addr,
1764 .ndo_tx_timeout = ibmvnic_tx_timeout,
1765 #ifdef CONFIG_NET_POLL_CONTROLLER
1766 .ndo_poll_controller = ibmvnic_netpoll_controller,
1768 .ndo_change_mtu = ibmvnic_change_mtu,
1771 /* ethtool functions */
1773 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1774 struct ethtool_link_ksettings *cmd)
1776 u32 supported, advertising;
1778 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1780 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1782 cmd->base.speed = SPEED_1000;
1783 cmd->base.duplex = DUPLEX_FULL;
1784 cmd->base.port = PORT_FIBRE;
1785 cmd->base.phy_address = 0;
1786 cmd->base.autoneg = AUTONEG_ENABLE;
1788 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1790 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1796 static void ibmvnic_get_drvinfo(struct net_device *dev,
1797 struct ethtool_drvinfo *info)
1799 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1800 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1803 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1805 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1807 return adapter->msg_enable;
1810 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1812 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1814 adapter->msg_enable = data;
1817 static u32 ibmvnic_get_link(struct net_device *netdev)
1819 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1821 /* Don't need to send a query because we request a logical link up at
1822 * init and then we wait for link state indications
1824 return adapter->logical_link_state;
1827 static void ibmvnic_get_ringparam(struct net_device *netdev,
1828 struct ethtool_ringparam *ring)
1830 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1832 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
1833 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
1834 ring->rx_mini_max_pending = 0;
1835 ring->rx_jumbo_max_pending = 0;
1836 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
1837 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
1838 ring->rx_mini_pending = 0;
1839 ring->rx_jumbo_pending = 0;
1842 static void ibmvnic_get_channels(struct net_device *netdev,
1843 struct ethtool_channels *channels)
1845 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1847 channels->max_rx = adapter->max_rx_queues;
1848 channels->max_tx = adapter->max_tx_queues;
1849 channels->max_other = 0;
1850 channels->max_combined = 0;
1851 channels->rx_count = adapter->req_rx_queues;
1852 channels->tx_count = adapter->req_tx_queues;
1853 channels->other_count = 0;
1854 channels->combined_count = 0;
1857 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1859 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1862 if (stringset != ETH_SS_STATS)
1865 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1866 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1868 for (i = 0; i < adapter->req_tx_queues; i++) {
1869 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
1870 data += ETH_GSTRING_LEN;
1872 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
1873 data += ETH_GSTRING_LEN;
1875 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
1876 data += ETH_GSTRING_LEN;
1879 for (i = 0; i < adapter->req_rx_queues; i++) {
1880 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
1881 data += ETH_GSTRING_LEN;
1883 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
1884 data += ETH_GSTRING_LEN;
1886 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
1887 data += ETH_GSTRING_LEN;
1891 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1893 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1897 return ARRAY_SIZE(ibmvnic_stats) +
1898 adapter->req_tx_queues * NUM_TX_STATS +
1899 adapter->req_rx_queues * NUM_RX_STATS;
1905 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1906 struct ethtool_stats *stats, u64 *data)
1908 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1909 union ibmvnic_crq crq;
1912 memset(&crq, 0, sizeof(crq));
1913 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1914 crq.request_statistics.cmd = REQUEST_STATISTICS;
1915 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1916 crq.request_statistics.len =
1917 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1919 /* Wait for data to be written */
1920 init_completion(&adapter->stats_done);
1921 ibmvnic_send_crq(adapter, &crq);
1922 wait_for_completion(&adapter->stats_done);
1924 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1925 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
1926 ibmvnic_stats[i].offset));
1928 for (j = 0; j < adapter->req_tx_queues; j++) {
1929 data[i] = adapter->tx_stats_buffers[j].packets;
1931 data[i] = adapter->tx_stats_buffers[j].bytes;
1933 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
1937 for (j = 0; j < adapter->req_rx_queues; j++) {
1938 data[i] = adapter->rx_stats_buffers[j].packets;
1940 data[i] = adapter->rx_stats_buffers[j].bytes;
1942 data[i] = adapter->rx_stats_buffers[j].interrupts;
1947 static const struct ethtool_ops ibmvnic_ethtool_ops = {
1948 .get_drvinfo = ibmvnic_get_drvinfo,
1949 .get_msglevel = ibmvnic_get_msglevel,
1950 .set_msglevel = ibmvnic_set_msglevel,
1951 .get_link = ibmvnic_get_link,
1952 .get_ringparam = ibmvnic_get_ringparam,
1953 .get_channels = ibmvnic_get_channels,
1954 .get_strings = ibmvnic_get_strings,
1955 .get_sset_count = ibmvnic_get_sset_count,
1956 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1957 .get_link_ksettings = ibmvnic_get_link_ksettings,
1960 /* Routines for managing CRQs/sCRQs */
1962 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
1963 struct ibmvnic_sub_crq_queue *scrq)
1968 free_irq(scrq->irq, scrq);
1969 irq_dispose_mapping(scrq->irq);
1973 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1974 atomic_set(&scrq->used, 0);
1977 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1978 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1982 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
1986 if (!adapter->tx_scrq || !adapter->rx_scrq)
1989 for (i = 0; i < adapter->req_tx_queues; i++) {
1990 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
1991 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
1996 for (i = 0; i < adapter->req_rx_queues; i++) {
1997 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
1998 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2006 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2007 struct ibmvnic_sub_crq_queue *scrq)
2009 struct device *dev = &adapter->vdev->dev;
2012 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2014 /* Close the sub-crqs */
2016 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2017 adapter->vdev->unit_address,
2019 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2022 netdev_err(adapter->netdev,
2023 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2027 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2029 free_pages((unsigned long)scrq->msgs, 2);
2033 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2036 struct device *dev = &adapter->vdev->dev;
2037 struct ibmvnic_sub_crq_queue *scrq;
2040 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2045 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2047 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2048 goto zero_page_failed;
2051 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2053 if (dma_mapping_error(dev, scrq->msg_token)) {
2054 dev_warn(dev, "Couldn't map crq queue messages page\n");
2058 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2059 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2061 if (rc == H_RESOURCE)
2062 rc = ibmvnic_reset_crq(adapter);
2064 if (rc == H_CLOSED) {
2065 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2067 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2071 scrq->adapter = adapter;
2072 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2073 spin_lock_init(&scrq->lock);
2075 netdev_dbg(adapter->netdev,
2076 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2077 scrq->crq_num, scrq->hw_irq, scrq->irq);
2082 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2085 free_pages((unsigned long)scrq->msgs, 2);
2092 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
2096 if (adapter->tx_scrq) {
2097 for (i = 0; i < adapter->req_tx_queues; i++) {
2098 if (!adapter->tx_scrq[i])
2101 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2103 if (adapter->tx_scrq[i]->irq) {
2104 free_irq(adapter->tx_scrq[i]->irq,
2105 adapter->tx_scrq[i]);
2106 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2107 adapter->tx_scrq[i]->irq = 0;
2110 release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2113 kfree(adapter->tx_scrq);
2114 adapter->tx_scrq = NULL;
2117 if (adapter->rx_scrq) {
2118 for (i = 0; i < adapter->req_rx_queues; i++) {
2119 if (!adapter->rx_scrq[i])
2122 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2124 if (adapter->rx_scrq[i]->irq) {
2125 free_irq(adapter->rx_scrq[i]->irq,
2126 adapter->rx_scrq[i]);
2127 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2128 adapter->rx_scrq[i]->irq = 0;
2131 release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2134 kfree(adapter->rx_scrq);
2135 adapter->rx_scrq = NULL;
2139 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2140 struct ibmvnic_sub_crq_queue *scrq)
2142 struct device *dev = &adapter->vdev->dev;
2145 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2146 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2148 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2153 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2154 struct ibmvnic_sub_crq_queue *scrq)
2156 struct device *dev = &adapter->vdev->dev;
2159 if (scrq->hw_irq > 0x100000000ULL) {
2160 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2164 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2165 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2167 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2172 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2173 struct ibmvnic_sub_crq_queue *scrq)
2175 struct device *dev = &adapter->vdev->dev;
2176 struct ibmvnic_tx_buff *txbuff;
2177 union sub_crq *next;
2183 while (pending_scrq(adapter, scrq)) {
2184 unsigned int pool = scrq->pool_index;
2186 /* The queue entry at the current index is peeked at above
2187 * to determine that there is a valid descriptor awaiting
2188 * processing. We want to be sure that the current slot
2189 * holds a valid descriptor before reading its contents.
2193 next = ibmvnic_next_scrq(adapter, scrq);
2194 for (i = 0; i < next->tx_comp.num_comps; i++) {
2195 if (next->tx_comp.rcs[i])
2196 dev_err(dev, "tx error %x\n",
2197 next->tx_comp.rcs[i]);
2198 index = be32_to_cpu(next->tx_comp.correlators[i]);
2199 txbuff = &adapter->tx_pool[pool].tx_buff[index];
2201 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2202 if (!txbuff->data_dma[j])
2205 txbuff->data_dma[j] = 0;
2207 /* if sub_crq was sent indirectly */
2208 first = txbuff->indir_arr[0].generic.first;
2209 if (first == IBMVNIC_CRQ_CMD) {
2210 dma_unmap_single(dev, txbuff->indir_dma,
2211 sizeof(txbuff->indir_arr),
2215 if (txbuff->last_frag) {
2216 dev_kfree_skb_any(txbuff->skb);
2220 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
2221 producer_index] = index;
2222 adapter->tx_pool[pool].producer_index =
2223 (adapter->tx_pool[pool].producer_index + 1) %
2224 adapter->req_tx_entries_per_subcrq;
2226 /* remove tx_comp scrq*/
2227 next->tx_comp.first = 0;
2229 if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <=
2230 (adapter->req_tx_entries_per_subcrq / 2) &&
2231 __netif_subqueue_stopped(adapter->netdev,
2232 scrq->pool_index)) {
2233 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2234 netdev_info(adapter->netdev, "Started queue %d\n",
2239 enable_scrq_irq(adapter, scrq);
2241 if (pending_scrq(adapter, scrq)) {
2242 disable_scrq_irq(adapter, scrq);
2249 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2251 struct ibmvnic_sub_crq_queue *scrq = instance;
2252 struct ibmvnic_adapter *adapter = scrq->adapter;
2254 disable_scrq_irq(adapter, scrq);
2255 ibmvnic_complete_tx(adapter, scrq);
2260 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2262 struct ibmvnic_sub_crq_queue *scrq = instance;
2263 struct ibmvnic_adapter *adapter = scrq->adapter;
2265 /* When booting a kdump kernel we can hit pending interrupts
2266 * prior to completing driver initialization.
2268 if (unlikely(adapter->state != VNIC_OPEN))
2271 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2273 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2274 disable_scrq_irq(adapter, scrq);
2275 __napi_schedule(&adapter->napi[scrq->scrq_num]);
2281 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2283 struct device *dev = &adapter->vdev->dev;
2284 struct ibmvnic_sub_crq_queue *scrq;
2288 for (i = 0; i < adapter->req_tx_queues; i++) {
2289 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2291 scrq = adapter->tx_scrq[i];
2292 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2296 dev_err(dev, "Error mapping irq\n");
2297 goto req_tx_irq_failed;
2300 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2301 0, "ibmvnic_tx", scrq);
2304 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2306 irq_dispose_mapping(scrq->irq);
2307 goto req_rx_irq_failed;
2311 for (i = 0; i < adapter->req_rx_queues; i++) {
2312 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2314 scrq = adapter->rx_scrq[i];
2315 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2318 dev_err(dev, "Error mapping irq\n");
2319 goto req_rx_irq_failed;
2321 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2322 0, "ibmvnic_rx", scrq);
2324 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2326 irq_dispose_mapping(scrq->irq);
2327 goto req_rx_irq_failed;
2333 for (j = 0; j < i; j++) {
2334 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2335 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2337 i = adapter->req_tx_queues;
2339 for (j = 0; j < i; j++) {
2340 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2341 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
2343 release_sub_crqs(adapter);
2347 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2349 struct device *dev = &adapter->vdev->dev;
2350 struct ibmvnic_sub_crq_queue **allqueues;
2351 int registered_queues = 0;
2356 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2358 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2362 for (i = 0; i < total_queues; i++) {
2363 allqueues[i] = init_sub_crq_queue(adapter);
2364 if (!allqueues[i]) {
2365 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2368 registered_queues++;
2371 /* Make sure we were able to register the minimum number of queues */
2372 if (registered_queues <
2373 adapter->min_tx_queues + adapter->min_rx_queues) {
2374 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
2378 /* Distribute the failed allocated queues*/
2379 for (i = 0; i < total_queues - registered_queues + more ; i++) {
2380 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
2383 if (adapter->req_rx_queues > adapter->min_rx_queues)
2384 adapter->req_rx_queues--;
2389 if (adapter->req_tx_queues > adapter->min_tx_queues)
2390 adapter->req_tx_queues--;
2397 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
2398 sizeof(*adapter->tx_scrq), GFP_KERNEL);
2399 if (!adapter->tx_scrq)
2402 for (i = 0; i < adapter->req_tx_queues; i++) {
2403 adapter->tx_scrq[i] = allqueues[i];
2404 adapter->tx_scrq[i]->pool_index = i;
2407 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
2408 sizeof(*adapter->rx_scrq), GFP_KERNEL);
2409 if (!adapter->rx_scrq)
2412 for (i = 0; i < adapter->req_rx_queues; i++) {
2413 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
2414 adapter->rx_scrq[i]->scrq_num = i;
2421 kfree(adapter->tx_scrq);
2422 adapter->tx_scrq = NULL;
2424 for (i = 0; i < registered_queues; i++)
2425 release_sub_crq_queue(adapter, allqueues[i]);
2430 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
2432 struct device *dev = &adapter->vdev->dev;
2433 union ibmvnic_crq crq;
2436 /* Sub-CRQ entries are 32 byte long */
2437 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
2439 if (adapter->min_tx_entries_per_subcrq > entries_page ||
2440 adapter->min_rx_add_entries_per_subcrq > entries_page) {
2441 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
2445 /* Get the minimum between the queried max and the entries
2446 * that fit in our PAGE_SIZE
2448 adapter->req_tx_entries_per_subcrq =
2449 adapter->max_tx_entries_per_subcrq > entries_page ?
2450 entries_page : adapter->max_tx_entries_per_subcrq;
2451 adapter->req_rx_add_entries_per_subcrq =
2452 adapter->max_rx_add_entries_per_subcrq > entries_page ?
2453 entries_page : adapter->max_rx_add_entries_per_subcrq;
2455 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
2456 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
2457 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
2459 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
2462 memset(&crq, 0, sizeof(crq));
2463 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2464 crq.request_capability.cmd = REQUEST_CAPABILITY;
2466 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
2467 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
2468 atomic_inc(&adapter->running_cap_crqs);
2469 ibmvnic_send_crq(adapter, &crq);
2471 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
2472 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
2473 atomic_inc(&adapter->running_cap_crqs);
2474 ibmvnic_send_crq(adapter, &crq);
2476 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
2477 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
2478 atomic_inc(&adapter->running_cap_crqs);
2479 ibmvnic_send_crq(adapter, &crq);
2481 crq.request_capability.capability =
2482 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
2483 crq.request_capability.number =
2484 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
2485 atomic_inc(&adapter->running_cap_crqs);
2486 ibmvnic_send_crq(adapter, &crq);
2488 crq.request_capability.capability =
2489 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
2490 crq.request_capability.number =
2491 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
2492 atomic_inc(&adapter->running_cap_crqs);
2493 ibmvnic_send_crq(adapter, &crq);
2495 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
2496 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
2497 atomic_inc(&adapter->running_cap_crqs);
2498 ibmvnic_send_crq(adapter, &crq);
2500 if (adapter->netdev->flags & IFF_PROMISC) {
2501 if (adapter->promisc_supported) {
2502 crq.request_capability.capability =
2503 cpu_to_be16(PROMISC_REQUESTED);
2504 crq.request_capability.number = cpu_to_be64(1);
2505 atomic_inc(&adapter->running_cap_crqs);
2506 ibmvnic_send_crq(adapter, &crq);
2509 crq.request_capability.capability =
2510 cpu_to_be16(PROMISC_REQUESTED);
2511 crq.request_capability.number = cpu_to_be64(0);
2512 atomic_inc(&adapter->running_cap_crqs);
2513 ibmvnic_send_crq(adapter, &crq);
2517 static int pending_scrq(struct ibmvnic_adapter *adapter,
2518 struct ibmvnic_sub_crq_queue *scrq)
2520 union sub_crq *entry = &scrq->msgs[scrq->cur];
2522 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
2528 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
2529 struct ibmvnic_sub_crq_queue *scrq)
2531 union sub_crq *entry;
2532 unsigned long flags;
2534 spin_lock_irqsave(&scrq->lock, flags);
2535 entry = &scrq->msgs[scrq->cur];
2536 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2537 if (++scrq->cur == scrq->size)
2542 spin_unlock_irqrestore(&scrq->lock, flags);
2544 /* Ensure that the entire buffer descriptor has been
2545 * loaded before reading its contents
2552 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
2554 struct ibmvnic_crq_queue *queue = &adapter->crq;
2555 union ibmvnic_crq *crq;
2557 crq = &queue->msgs[queue->cur];
2558 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2559 if (++queue->cur == queue->size)
2568 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
2569 union sub_crq *sub_crq)
2571 unsigned int ua = adapter->vdev->unit_address;
2572 struct device *dev = &adapter->vdev->dev;
2573 u64 *u64_crq = (u64 *)sub_crq;
2576 netdev_dbg(adapter->netdev,
2577 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
2578 (unsigned long int)cpu_to_be64(remote_handle),
2579 (unsigned long int)cpu_to_be64(u64_crq[0]),
2580 (unsigned long int)cpu_to_be64(u64_crq[1]),
2581 (unsigned long int)cpu_to_be64(u64_crq[2]),
2582 (unsigned long int)cpu_to_be64(u64_crq[3]));
2584 /* Make sure the hypervisor sees the complete request */
2587 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
2588 cpu_to_be64(remote_handle),
2589 cpu_to_be64(u64_crq[0]),
2590 cpu_to_be64(u64_crq[1]),
2591 cpu_to_be64(u64_crq[2]),
2592 cpu_to_be64(u64_crq[3]));
2596 dev_warn(dev, "CRQ Queue closed\n");
2597 dev_err(dev, "Send error (rc=%d)\n", rc);
2603 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
2604 u64 remote_handle, u64 ioba, u64 num_entries)
2606 unsigned int ua = adapter->vdev->unit_address;
2607 struct device *dev = &adapter->vdev->dev;
2610 /* Make sure the hypervisor sees the complete request */
2612 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
2613 cpu_to_be64(remote_handle),
2618 dev_warn(dev, "CRQ Queue closed\n");
2619 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
2625 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
2626 union ibmvnic_crq *crq)
2628 unsigned int ua = adapter->vdev->unit_address;
2629 struct device *dev = &adapter->vdev->dev;
2630 u64 *u64_crq = (u64 *)crq;
2633 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
2634 (unsigned long int)cpu_to_be64(u64_crq[0]),
2635 (unsigned long int)cpu_to_be64(u64_crq[1]));
2637 /* Make sure the hypervisor sees the complete request */
2640 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
2641 cpu_to_be64(u64_crq[0]),
2642 cpu_to_be64(u64_crq[1]));
2646 dev_warn(dev, "CRQ Queue closed\n");
2647 dev_warn(dev, "Send error (rc=%d)\n", rc);
2653 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
2655 union ibmvnic_crq crq;
2657 memset(&crq, 0, sizeof(crq));
2658 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
2659 crq.generic.cmd = IBMVNIC_CRQ_INIT;
2660 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
2662 return ibmvnic_send_crq(adapter, &crq);
2665 static int send_version_xchg(struct ibmvnic_adapter *adapter)
2667 union ibmvnic_crq crq;
2669 memset(&crq, 0, sizeof(crq));
2670 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
2671 crq.version_exchange.cmd = VERSION_EXCHANGE;
2672 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
2674 return ibmvnic_send_crq(adapter, &crq);
2677 static void send_login(struct ibmvnic_adapter *adapter)
2679 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
2680 struct ibmvnic_login_buffer *login_buffer;
2681 struct device *dev = &adapter->vdev->dev;
2682 dma_addr_t rsp_buffer_token;
2683 dma_addr_t buffer_token;
2684 size_t rsp_buffer_size;
2685 union ibmvnic_crq crq;
2692 sizeof(struct ibmvnic_login_buffer) +
2693 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
2695 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
2697 goto buf_alloc_failed;
2699 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
2701 if (dma_mapping_error(dev, buffer_token)) {
2702 dev_err(dev, "Couldn't map login buffer\n");
2703 goto buf_map_failed;
2706 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
2707 sizeof(u64) * adapter->req_tx_queues +
2708 sizeof(u64) * adapter->req_rx_queues +
2709 sizeof(u64) * adapter->req_rx_queues +
2710 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
2712 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
2713 if (!login_rsp_buffer)
2714 goto buf_rsp_alloc_failed;
2716 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
2717 rsp_buffer_size, DMA_FROM_DEVICE);
2718 if (dma_mapping_error(dev, rsp_buffer_token)) {
2719 dev_err(dev, "Couldn't map login rsp buffer\n");
2720 goto buf_rsp_map_failed;
2723 adapter->login_buf = login_buffer;
2724 adapter->login_buf_token = buffer_token;
2725 adapter->login_buf_sz = buffer_size;
2726 adapter->login_rsp_buf = login_rsp_buffer;
2727 adapter->login_rsp_buf_token = rsp_buffer_token;
2728 adapter->login_rsp_buf_sz = rsp_buffer_size;
2730 login_buffer->len = cpu_to_be32(buffer_size);
2731 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
2732 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
2733 login_buffer->off_txcomp_subcrqs =
2734 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
2735 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
2736 login_buffer->off_rxcomp_subcrqs =
2737 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
2738 sizeof(u64) * adapter->req_tx_queues);
2739 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
2740 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
2742 tx_list_p = (__be64 *)((char *)login_buffer +
2743 sizeof(struct ibmvnic_login_buffer));
2744 rx_list_p = (__be64 *)((char *)login_buffer +
2745 sizeof(struct ibmvnic_login_buffer) +
2746 sizeof(u64) * adapter->req_tx_queues);
2748 for (i = 0; i < adapter->req_tx_queues; i++) {
2749 if (adapter->tx_scrq[i]) {
2750 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
2755 for (i = 0; i < adapter->req_rx_queues; i++) {
2756 if (adapter->rx_scrq[i]) {
2757 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
2762 netdev_dbg(adapter->netdev, "Login Buffer:\n");
2763 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
2764 netdev_dbg(adapter->netdev, "%016lx\n",
2765 ((unsigned long int *)(adapter->login_buf))[i]);
2768 memset(&crq, 0, sizeof(crq));
2769 crq.login.first = IBMVNIC_CRQ_CMD;
2770 crq.login.cmd = LOGIN;
2771 crq.login.ioba = cpu_to_be32(buffer_token);
2772 crq.login.len = cpu_to_be32(buffer_size);
2773 ibmvnic_send_crq(adapter, &crq);
2778 kfree(login_rsp_buffer);
2779 buf_rsp_alloc_failed:
2780 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
2782 kfree(login_buffer);
2787 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
2790 union ibmvnic_crq crq;
2792 memset(&crq, 0, sizeof(crq));
2793 crq.request_map.first = IBMVNIC_CRQ_CMD;
2794 crq.request_map.cmd = REQUEST_MAP;
2795 crq.request_map.map_id = map_id;
2796 crq.request_map.ioba = cpu_to_be32(addr);
2797 crq.request_map.len = cpu_to_be32(len);
2798 ibmvnic_send_crq(adapter, &crq);
2801 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
2803 union ibmvnic_crq crq;
2805 memset(&crq, 0, sizeof(crq));
2806 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
2807 crq.request_unmap.cmd = REQUEST_UNMAP;
2808 crq.request_unmap.map_id = map_id;
2809 ibmvnic_send_crq(adapter, &crq);
2812 static void send_map_query(struct ibmvnic_adapter *adapter)
2814 union ibmvnic_crq crq;
2816 memset(&crq, 0, sizeof(crq));
2817 crq.query_map.first = IBMVNIC_CRQ_CMD;
2818 crq.query_map.cmd = QUERY_MAP;
2819 ibmvnic_send_crq(adapter, &crq);
2822 /* Send a series of CRQs requesting various capabilities of the VNIC server */
2823 static void send_cap_queries(struct ibmvnic_adapter *adapter)
2825 union ibmvnic_crq crq;
2827 atomic_set(&adapter->running_cap_crqs, 0);
2828 memset(&crq, 0, sizeof(crq));
2829 crq.query_capability.first = IBMVNIC_CRQ_CMD;
2830 crq.query_capability.cmd = QUERY_CAPABILITY;
2832 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
2833 atomic_inc(&adapter->running_cap_crqs);
2834 ibmvnic_send_crq(adapter, &crq);
2836 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
2837 atomic_inc(&adapter->running_cap_crqs);
2838 ibmvnic_send_crq(adapter, &crq);
2840 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
2841 atomic_inc(&adapter->running_cap_crqs);
2842 ibmvnic_send_crq(adapter, &crq);
2844 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
2845 atomic_inc(&adapter->running_cap_crqs);
2846 ibmvnic_send_crq(adapter, &crq);
2848 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
2849 atomic_inc(&adapter->running_cap_crqs);
2850 ibmvnic_send_crq(adapter, &crq);
2852 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
2853 atomic_inc(&adapter->running_cap_crqs);
2854 ibmvnic_send_crq(adapter, &crq);
2856 crq.query_capability.capability =
2857 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
2858 atomic_inc(&adapter->running_cap_crqs);
2859 ibmvnic_send_crq(adapter, &crq);
2861 crq.query_capability.capability =
2862 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
2863 atomic_inc(&adapter->running_cap_crqs);
2864 ibmvnic_send_crq(adapter, &crq);
2866 crq.query_capability.capability =
2867 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2868 atomic_inc(&adapter->running_cap_crqs);
2869 ibmvnic_send_crq(adapter, &crq);
2871 crq.query_capability.capability =
2872 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2873 atomic_inc(&adapter->running_cap_crqs);
2874 ibmvnic_send_crq(adapter, &crq);
2876 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2877 atomic_inc(&adapter->running_cap_crqs);
2878 ibmvnic_send_crq(adapter, &crq);
2880 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2881 atomic_inc(&adapter->running_cap_crqs);
2882 ibmvnic_send_crq(adapter, &crq);
2884 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2885 atomic_inc(&adapter->running_cap_crqs);
2886 ibmvnic_send_crq(adapter, &crq);
2888 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2889 atomic_inc(&adapter->running_cap_crqs);
2890 ibmvnic_send_crq(adapter, &crq);
2892 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2893 atomic_inc(&adapter->running_cap_crqs);
2894 ibmvnic_send_crq(adapter, &crq);
2896 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2897 atomic_inc(&adapter->running_cap_crqs);
2898 ibmvnic_send_crq(adapter, &crq);
2900 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
2901 atomic_inc(&adapter->running_cap_crqs);
2902 ibmvnic_send_crq(adapter, &crq);
2904 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2905 atomic_inc(&adapter->running_cap_crqs);
2906 ibmvnic_send_crq(adapter, &crq);
2908 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2909 atomic_inc(&adapter->running_cap_crqs);
2910 ibmvnic_send_crq(adapter, &crq);
2912 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2913 atomic_inc(&adapter->running_cap_crqs);
2914 ibmvnic_send_crq(adapter, &crq);
2916 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2917 atomic_inc(&adapter->running_cap_crqs);
2918 ibmvnic_send_crq(adapter, &crq);
2920 crq.query_capability.capability =
2921 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2922 atomic_inc(&adapter->running_cap_crqs);
2923 ibmvnic_send_crq(adapter, &crq);
2925 crq.query_capability.capability =
2926 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2927 atomic_inc(&adapter->running_cap_crqs);
2928 ibmvnic_send_crq(adapter, &crq);
2930 crq.query_capability.capability =
2931 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2932 atomic_inc(&adapter->running_cap_crqs);
2933 ibmvnic_send_crq(adapter, &crq);
2935 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2936 atomic_inc(&adapter->running_cap_crqs);
2937 ibmvnic_send_crq(adapter, &crq);
2940 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2942 struct device *dev = &adapter->vdev->dev;
2943 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2944 union ibmvnic_crq crq;
2947 dma_unmap_single(dev, adapter->ip_offload_tok,
2948 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2950 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2951 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2952 netdev_dbg(adapter->netdev, "%016lx\n",
2953 ((unsigned long int *)(buf))[i]);
2955 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2956 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2957 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2958 buf->tcp_ipv4_chksum);
2959 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2960 buf->tcp_ipv6_chksum);
2961 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2962 buf->udp_ipv4_chksum);
2963 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2964 buf->udp_ipv6_chksum);
2965 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2966 buf->large_tx_ipv4);
2967 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2968 buf->large_tx_ipv6);
2969 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2970 buf->large_rx_ipv4);
2971 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2972 buf->large_rx_ipv6);
2973 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2974 buf->max_ipv4_header_size);
2975 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2976 buf->max_ipv6_header_size);
2977 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2978 buf->max_tcp_header_size);
2979 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2980 buf->max_udp_header_size);
2981 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2982 buf->max_large_tx_size);
2983 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2984 buf->max_large_rx_size);
2985 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2986 buf->ipv6_extension_header);
2987 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2988 buf->tcp_pseudosum_req);
2989 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2990 buf->num_ipv6_ext_headers);
2991 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2992 buf->off_ipv6_ext_headers);
2994 adapter->ip_offload_ctrl_tok =
2995 dma_map_single(dev, &adapter->ip_offload_ctrl,
2996 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2998 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2999 dev_err(dev, "Couldn't map ip offload control buffer\n");
3003 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3004 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3005 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3006 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3007 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3009 /* large_tx/rx disabled for now, additional features needed */
3010 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
3011 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
3012 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3013 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3015 adapter->netdev->features = NETIF_F_GSO;
3017 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3018 adapter->netdev->features |= NETIF_F_IP_CSUM;
3020 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3021 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
3023 if ((adapter->netdev->features &
3024 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3025 adapter->netdev->features |= NETIF_F_RXCSUM;
3027 memset(&crq, 0, sizeof(crq));
3028 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3029 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3030 crq.control_ip_offload.len =
3031 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3032 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3033 ibmvnic_send_crq(adapter, &crq);
3036 static void handle_error_info_rsp(union ibmvnic_crq *crq,
3037 struct ibmvnic_adapter *adapter)
3039 struct device *dev = &adapter->vdev->dev;
3040 struct ibmvnic_error_buff *error_buff, *tmp;
3041 unsigned long flags;
3045 if (!crq->request_error_rsp.rc.code) {
3046 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
3047 crq->request_error_rsp.rc.code);
3051 spin_lock_irqsave(&adapter->error_list_lock, flags);
3052 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
3053 if (error_buff->error_id == crq->request_error_rsp.error_id) {
3055 list_del(&error_buff->list);
3058 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3061 dev_err(dev, "Couldn't find error id %x\n",
3062 be32_to_cpu(crq->request_error_rsp.error_id));
3066 dev_err(dev, "Detailed info for error id %x:",
3067 be32_to_cpu(crq->request_error_rsp.error_id));
3069 for (i = 0; i < error_buff->len; i++) {
3070 pr_cont("%02x", (int)error_buff->buff[i]);
3076 dma_unmap_single(dev, error_buff->dma, error_buff->len,
3078 kfree(error_buff->buff);
3082 static void request_error_information(struct ibmvnic_adapter *adapter,
3083 union ibmvnic_crq *err_crq)
3085 struct device *dev = &adapter->vdev->dev;
3086 struct net_device *netdev = adapter->netdev;
3087 struct ibmvnic_error_buff *error_buff;
3088 unsigned long timeout = msecs_to_jiffies(30000);
3089 union ibmvnic_crq crq;
3090 unsigned long flags;
3093 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
3097 detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
3098 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
3099 if (!error_buff->buff) {
3104 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
3106 if (dma_mapping_error(dev, error_buff->dma)) {
3107 netdev_err(netdev, "Couldn't map error buffer\n");
3108 kfree(error_buff->buff);
3113 error_buff->len = detail_len;
3114 error_buff->error_id = err_crq->error_indication.error_id;
3116 spin_lock_irqsave(&adapter->error_list_lock, flags);
3117 list_add_tail(&error_buff->list, &adapter->errors);
3118 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3120 memset(&crq, 0, sizeof(crq));
3121 crq.request_error_info.first = IBMVNIC_CRQ_CMD;
3122 crq.request_error_info.cmd = REQUEST_ERROR_INFO;
3123 crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
3124 crq.request_error_info.len = cpu_to_be32(detail_len);
3125 crq.request_error_info.error_id = err_crq->error_indication.error_id;
3127 rc = ibmvnic_send_crq(adapter, &crq);
3129 netdev_err(netdev, "failed to request error information\n");
3133 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3134 netdev_err(netdev, "timeout waiting for error information\n");
3141 spin_lock_irqsave(&adapter->error_list_lock, flags);
3142 list_del(&error_buff->list);
3143 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3145 kfree(error_buff->buff);
3149 static void handle_error_indication(union ibmvnic_crq *crq,
3150 struct ibmvnic_adapter *adapter)
3152 struct device *dev = &adapter->vdev->dev;
3154 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
3155 crq->error_indication.flags
3156 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3157 be32_to_cpu(crq->error_indication.error_id),
3158 be16_to_cpu(crq->error_indication.error_cause));
3160 if (be32_to_cpu(crq->error_indication.error_id))
3161 request_error_information(adapter, crq);
3163 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3164 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3166 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3169 static void handle_change_mac_rsp(union ibmvnic_crq *crq,
3170 struct ibmvnic_adapter *adapter)
3172 struct net_device *netdev = adapter->netdev;
3173 struct device *dev = &adapter->vdev->dev;
3176 rc = crq->change_mac_addr_rsp.rc.code;
3178 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3181 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
3185 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3186 struct ibmvnic_adapter *adapter)
3188 struct device *dev = &adapter->vdev->dev;
3192 atomic_dec(&adapter->running_cap_crqs);
3193 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3195 req_value = &adapter->req_tx_queues;
3199 req_value = &adapter->req_rx_queues;
3202 case REQ_RX_ADD_QUEUES:
3203 req_value = &adapter->req_rx_add_queues;
3206 case REQ_TX_ENTRIES_PER_SUBCRQ:
3207 req_value = &adapter->req_tx_entries_per_subcrq;
3208 name = "tx_entries_per_subcrq";
3210 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3211 req_value = &adapter->req_rx_add_entries_per_subcrq;
3212 name = "rx_add_entries_per_subcrq";
3215 req_value = &adapter->req_mtu;
3218 case PROMISC_REQUESTED:
3219 req_value = &adapter->promisc;
3223 dev_err(dev, "Got invalid cap request rsp %d\n",
3224 crq->request_capability.capability);
3228 switch (crq->request_capability_rsp.rc.code) {
3231 case PARTIALSUCCESS:
3232 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3234 (long int)be64_to_cpu(crq->request_capability_rsp.
3236 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
3237 ibmvnic_send_req_caps(adapter, 1);
3240 dev_err(dev, "Error %d in request cap rsp\n",
3241 crq->request_capability_rsp.rc.code);
3245 /* Done receiving requested capabilities, query IP offload support */
3246 if (atomic_read(&adapter->running_cap_crqs) == 0) {
3247 union ibmvnic_crq newcrq;
3248 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
3249 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
3250 &adapter->ip_offload_buf;
3252 adapter->wait_capability = false;
3253 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
3257 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
3258 if (!firmware_has_feature(FW_FEATURE_CMO))
3259 dev_err(dev, "Couldn't map offload buffer\n");
3263 memset(&newcrq, 0, sizeof(newcrq));
3264 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
3265 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
3266 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
3267 newcrq.query_ip_offload.ioba =
3268 cpu_to_be32(adapter->ip_offload_tok);
3270 ibmvnic_send_crq(adapter, &newcrq);
3274 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3275 struct ibmvnic_adapter *adapter)
3277 struct device *dev = &adapter->vdev->dev;
3278 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
3279 struct ibmvnic_login_buffer *login = adapter->login_buf;
3282 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
3284 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3285 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
3287 /* If the number of queues requested can't be allocated by the
3288 * server, the login response will return with code 1. We will need
3289 * to resend the login buffer with fewer queues requested.
3291 if (login_rsp_crq->generic.rc.code) {
3292 adapter->renegotiate = true;
3293 complete(&adapter->init_done);
3297 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
3298 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
3299 netdev_dbg(adapter->netdev, "%016lx\n",
3300 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
3304 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
3305 (be32_to_cpu(login->num_rxcomp_subcrqs) *
3306 adapter->req_rx_add_queues !=
3307 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
3308 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
3309 ibmvnic_remove(adapter->vdev);
3312 complete(&adapter->init_done);
3317 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
3318 struct ibmvnic_adapter *adapter)
3320 struct device *dev = &adapter->vdev->dev;
3323 rc = crq->request_unmap_rsp.rc.code;
3325 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
3328 static void handle_query_map_rsp(union ibmvnic_crq *crq,
3329 struct ibmvnic_adapter *adapter)
3331 struct net_device *netdev = adapter->netdev;
3332 struct device *dev = &adapter->vdev->dev;
3335 rc = crq->query_map_rsp.rc.code;
3337 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
3340 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
3341 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
3342 crq->query_map_rsp.free_pages);
3345 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
3346 struct ibmvnic_adapter *adapter)
3348 struct net_device *netdev = adapter->netdev;
3349 struct device *dev = &adapter->vdev->dev;
3352 atomic_dec(&adapter->running_cap_crqs);
3353 netdev_dbg(netdev, "Outstanding queries: %d\n",
3354 atomic_read(&adapter->running_cap_crqs));
3355 rc = crq->query_capability.rc.code;
3357 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
3361 switch (be16_to_cpu(crq->query_capability.capability)) {
3363 adapter->min_tx_queues =
3364 be64_to_cpu(crq->query_capability.number);
3365 netdev_dbg(netdev, "min_tx_queues = %lld\n",
3366 adapter->min_tx_queues);
3369 adapter->min_rx_queues =
3370 be64_to_cpu(crq->query_capability.number);
3371 netdev_dbg(netdev, "min_rx_queues = %lld\n",
3372 adapter->min_rx_queues);
3374 case MIN_RX_ADD_QUEUES:
3375 adapter->min_rx_add_queues =
3376 be64_to_cpu(crq->query_capability.number);
3377 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
3378 adapter->min_rx_add_queues);
3381 adapter->max_tx_queues =
3382 be64_to_cpu(crq->query_capability.number);
3383 netdev_dbg(netdev, "max_tx_queues = %lld\n",
3384 adapter->max_tx_queues);
3387 adapter->max_rx_queues =
3388 be64_to_cpu(crq->query_capability.number);
3389 netdev_dbg(netdev, "max_rx_queues = %lld\n",
3390 adapter->max_rx_queues);
3392 case MAX_RX_ADD_QUEUES:
3393 adapter->max_rx_add_queues =
3394 be64_to_cpu(crq->query_capability.number);
3395 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
3396 adapter->max_rx_add_queues);
3398 case MIN_TX_ENTRIES_PER_SUBCRQ:
3399 adapter->min_tx_entries_per_subcrq =
3400 be64_to_cpu(crq->query_capability.number);
3401 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
3402 adapter->min_tx_entries_per_subcrq);
3404 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
3405 adapter->min_rx_add_entries_per_subcrq =
3406 be64_to_cpu(crq->query_capability.number);
3407 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
3408 adapter->min_rx_add_entries_per_subcrq);
3410 case MAX_TX_ENTRIES_PER_SUBCRQ:
3411 adapter->max_tx_entries_per_subcrq =
3412 be64_to_cpu(crq->query_capability.number);
3413 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
3414 adapter->max_tx_entries_per_subcrq);
3416 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
3417 adapter->max_rx_add_entries_per_subcrq =
3418 be64_to_cpu(crq->query_capability.number);
3419 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
3420 adapter->max_rx_add_entries_per_subcrq);
3422 case TCP_IP_OFFLOAD:
3423 adapter->tcp_ip_offload =
3424 be64_to_cpu(crq->query_capability.number);
3425 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
3426 adapter->tcp_ip_offload);
3428 case PROMISC_SUPPORTED:
3429 adapter->promisc_supported =
3430 be64_to_cpu(crq->query_capability.number);
3431 netdev_dbg(netdev, "promisc_supported = %lld\n",
3432 adapter->promisc_supported);
3435 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
3436 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
3437 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
3440 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
3441 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
3442 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
3444 case MAX_MULTICAST_FILTERS:
3445 adapter->max_multicast_filters =
3446 be64_to_cpu(crq->query_capability.number);
3447 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
3448 adapter->max_multicast_filters);
3450 case VLAN_HEADER_INSERTION:
3451 adapter->vlan_header_insertion =
3452 be64_to_cpu(crq->query_capability.number);
3453 if (adapter->vlan_header_insertion)
3454 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
3455 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
3456 adapter->vlan_header_insertion);
3458 case RX_VLAN_HEADER_INSERTION:
3459 adapter->rx_vlan_header_insertion =
3460 be64_to_cpu(crq->query_capability.number);
3461 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
3462 adapter->rx_vlan_header_insertion);
3464 case MAX_TX_SG_ENTRIES:
3465 adapter->max_tx_sg_entries =
3466 be64_to_cpu(crq->query_capability.number);
3467 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
3468 adapter->max_tx_sg_entries);
3470 case RX_SG_SUPPORTED:
3471 adapter->rx_sg_supported =
3472 be64_to_cpu(crq->query_capability.number);
3473 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
3474 adapter->rx_sg_supported);
3476 case OPT_TX_COMP_SUB_QUEUES:
3477 adapter->opt_tx_comp_sub_queues =
3478 be64_to_cpu(crq->query_capability.number);
3479 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
3480 adapter->opt_tx_comp_sub_queues);
3482 case OPT_RX_COMP_QUEUES:
3483 adapter->opt_rx_comp_queues =
3484 be64_to_cpu(crq->query_capability.number);
3485 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
3486 adapter->opt_rx_comp_queues);
3488 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
3489 adapter->opt_rx_bufadd_q_per_rx_comp_q =
3490 be64_to_cpu(crq->query_capability.number);
3491 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
3492 adapter->opt_rx_bufadd_q_per_rx_comp_q);
3494 case OPT_TX_ENTRIES_PER_SUBCRQ:
3495 adapter->opt_tx_entries_per_subcrq =
3496 be64_to_cpu(crq->query_capability.number);
3497 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
3498 adapter->opt_tx_entries_per_subcrq);
3500 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
3501 adapter->opt_rxba_entries_per_subcrq =
3502 be64_to_cpu(crq->query_capability.number);
3503 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
3504 adapter->opt_rxba_entries_per_subcrq);
3506 case TX_RX_DESC_REQ:
3507 adapter->tx_rx_desc_req = crq->query_capability.number;
3508 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
3509 adapter->tx_rx_desc_req);
3513 netdev_err(netdev, "Got invalid cap rsp %d\n",
3514 crq->query_capability.capability);
3518 if (atomic_read(&adapter->running_cap_crqs) == 0) {
3519 adapter->wait_capability = false;
3520 ibmvnic_send_req_caps(adapter, 0);
3524 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3525 struct ibmvnic_adapter *adapter)
3527 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3528 struct net_device *netdev = adapter->netdev;
3529 struct device *dev = &adapter->vdev->dev;
3530 u64 *u64_crq = (u64 *)crq;
3533 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3534 (unsigned long int)cpu_to_be64(u64_crq[0]),
3535 (unsigned long int)cpu_to_be64(u64_crq[1]));
3536 switch (gen_crq->first) {
3537 case IBMVNIC_CRQ_INIT_RSP:
3538 switch (gen_crq->cmd) {
3539 case IBMVNIC_CRQ_INIT:
3540 dev_info(dev, "Partner initialized\n");
3541 adapter->from_passive_init = true;
3542 complete(&adapter->init_done);
3544 case IBMVNIC_CRQ_INIT_COMPLETE:
3545 dev_info(dev, "Partner initialization complete\n");
3546 send_version_xchg(adapter);
3549 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3552 case IBMVNIC_CRQ_XPORT_EVENT:
3553 netif_carrier_off(netdev);
3554 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3555 dev_info(dev, "Migrated, re-enabling adapter\n");
3556 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
3557 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3558 dev_info(dev, "Backing device failover detected\n");
3559 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
3561 /* The adapter lost the connection */
3562 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3564 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3567 case IBMVNIC_CRQ_CMD_RSP:
3570 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3575 switch (gen_crq->cmd) {
3576 case VERSION_EXCHANGE_RSP:
3577 rc = crq->version_exchange_rsp.rc.code;
3579 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3583 be16_to_cpu(crq->version_exchange_rsp.version);
3584 dev_info(dev, "Partner protocol version is %d\n",
3586 send_cap_queries(adapter);
3588 case QUERY_CAPABILITY_RSP:
3589 handle_query_cap_rsp(crq, adapter);
3592 handle_query_map_rsp(crq, adapter);
3594 case REQUEST_MAP_RSP:
3595 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
3596 complete(&adapter->fw_done);
3598 case REQUEST_UNMAP_RSP:
3599 handle_request_unmap_rsp(crq, adapter);
3601 case REQUEST_CAPABILITY_RSP:
3602 handle_request_cap_rsp(crq, adapter);
3605 netdev_dbg(netdev, "Got Login Response\n");
3606 handle_login_rsp(crq, adapter);
3608 case LOGICAL_LINK_STATE_RSP:
3610 "Got Logical Link State Response, state: %d rc: %d\n",
3611 crq->logical_link_state_rsp.link_state,
3612 crq->logical_link_state_rsp.rc.code);
3613 adapter->logical_link_state =
3614 crq->logical_link_state_rsp.link_state;
3615 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
3616 complete(&adapter->init_done);
3618 case LINK_STATE_INDICATION:
3619 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3620 adapter->phys_link_state =
3621 crq->link_state_indication.phys_link_state;
3622 adapter->logical_link_state =
3623 crq->link_state_indication.logical_link_state;
3625 case CHANGE_MAC_ADDR_RSP:
3626 netdev_dbg(netdev, "Got MAC address change Response\n");
3627 handle_change_mac_rsp(crq, adapter);
3629 case ERROR_INDICATION:
3630 netdev_dbg(netdev, "Got Error Indication\n");
3631 handle_error_indication(crq, adapter);
3633 case REQUEST_ERROR_RSP:
3634 netdev_dbg(netdev, "Got Error Detail Response\n");
3635 handle_error_info_rsp(crq, adapter);
3637 case REQUEST_STATISTICS_RSP:
3638 netdev_dbg(netdev, "Got Statistics Response\n");
3639 complete(&adapter->stats_done);
3641 case QUERY_IP_OFFLOAD_RSP:
3642 netdev_dbg(netdev, "Got Query IP offload Response\n");
3643 handle_query_ip_offload_rsp(adapter);
3645 case MULTICAST_CTRL_RSP:
3646 netdev_dbg(netdev, "Got multicast control Response\n");
3648 case CONTROL_IP_OFFLOAD_RSP:
3649 netdev_dbg(netdev, "Got Control IP offload Response\n");
3650 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3651 sizeof(adapter->ip_offload_ctrl),
3653 complete(&adapter->init_done);
3655 case COLLECT_FW_TRACE_RSP:
3656 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3657 complete(&adapter->fw_done);
3660 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3665 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3667 struct ibmvnic_adapter *adapter = instance;
3669 tasklet_schedule(&adapter->tasklet);
3673 static void ibmvnic_tasklet(void *data)
3675 struct ibmvnic_adapter *adapter = data;
3676 struct ibmvnic_crq_queue *queue = &adapter->crq;
3677 union ibmvnic_crq *crq;
3678 unsigned long flags;
3681 spin_lock_irqsave(&queue->lock, flags);
3683 /* Pull all the valid messages off the CRQ */
3684 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3685 /* This barrier makes sure ibmvnic_next_crq()'s
3686 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
3687 * before ibmvnic_handle_crq()'s
3688 * switch(gen_crq->first) and switch(gen_crq->cmd).
3691 ibmvnic_handle_crq(crq, adapter);
3692 crq->generic.first = 0;
3695 /* if capabilities CRQ's were sent in this tasklet, the following
3696 * tasklet must wait until all responses are received
3698 if (atomic_read(&adapter->running_cap_crqs) != 0)
3699 adapter->wait_capability = true;
3700 spin_unlock_irqrestore(&queue->lock, flags);
3703 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3705 struct vio_dev *vdev = adapter->vdev;
3709 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3710 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3713 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3718 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3720 struct ibmvnic_crq_queue *crq = &adapter->crq;
3721 struct device *dev = &adapter->vdev->dev;
3722 struct vio_dev *vdev = adapter->vdev;
3727 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3728 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3730 /* Clean out the queue */
3734 memset(crq->msgs, 0, PAGE_SIZE);
3737 /* And re-open it again */
3738 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3739 crq->msg_token, PAGE_SIZE);
3742 /* Adapter is good, but other end is not ready */
3743 dev_warn(dev, "Partner adapter not ready\n");
3745 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3750 static void release_crq_queue(struct ibmvnic_adapter *adapter)
3752 struct ibmvnic_crq_queue *crq = &adapter->crq;
3753 struct vio_dev *vdev = adapter->vdev;
3759 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3760 free_irq(vdev->irq, adapter);
3761 tasklet_kill(&adapter->tasklet);
3763 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3764 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3766 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3768 free_page((unsigned long)crq->msgs);
3772 static int init_crq_queue(struct ibmvnic_adapter *adapter)
3774 struct ibmvnic_crq_queue *crq = &adapter->crq;
3775 struct device *dev = &adapter->vdev->dev;
3776 struct vio_dev *vdev = adapter->vdev;
3777 int rc, retrc = -ENOMEM;
3782 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3783 /* Should we allocate more than one page? */
3788 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3789 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3791 if (dma_mapping_error(dev, crq->msg_token))
3794 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3795 crq->msg_token, PAGE_SIZE);
3797 if (rc == H_RESOURCE)
3798 /* maybe kexecing and resource is busy. try a reset */
3799 rc = ibmvnic_reset_crq(adapter);
3802 if (rc == H_CLOSED) {
3803 dev_warn(dev, "Partner adapter not ready\n");
3805 dev_warn(dev, "Error %d opening adapter\n", rc);
3806 goto reg_crq_failed;
3811 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3812 (unsigned long)adapter);
3814 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3815 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3818 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3820 goto req_irq_failed;
3823 rc = vio_enable_interrupts(vdev);
3825 dev_err(dev, "Error %d enabling interrupts\n", rc);
3826 goto req_irq_failed;
3830 spin_lock_init(&crq->lock);
3832 /* process any CRQs that were queued before we enabled interrupts */
3833 tasklet_schedule(&adapter->tasklet);
3838 tasklet_kill(&adapter->tasklet);
3840 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3841 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3843 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3845 free_page((unsigned long)crq->msgs);
3850 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
3852 struct device *dev = &adapter->vdev->dev;
3853 unsigned long timeout = msecs_to_jiffies(30000);
3856 if (adapter->resetting) {
3857 rc = ibmvnic_reset_crq(adapter);
3859 rc = vio_enable_interrupts(adapter->vdev);
3861 rc = init_crq_queue(adapter);
3865 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3869 adapter->from_passive_init = false;
3871 init_completion(&adapter->init_done);
3872 adapter->init_done_rc = 0;
3873 ibmvnic_send_crq_init(adapter);
3874 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3875 dev_err(dev, "Initialization sequence timed out\n");
3879 if (adapter->init_done_rc) {
3880 release_crq_queue(adapter);
3881 return adapter->init_done_rc;
3884 if (adapter->from_passive_init) {
3885 adapter->state = VNIC_OPEN;
3886 adapter->from_passive_init = false;
3890 if (adapter->resetting)
3891 rc = reset_sub_crq_queues(adapter);
3893 rc = init_sub_crqs(adapter);
3895 dev_err(dev, "Initialization of sub crqs failed\n");
3896 release_crq_queue(adapter);
3900 rc = init_sub_crq_irqs(adapter);
3902 dev_err(dev, "Failed to initialize sub crq irqs\n");
3903 release_crq_queue(adapter);
3909 static struct device_attribute dev_attr_failover;
3911 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3913 struct ibmvnic_adapter *adapter;
3914 struct net_device *netdev;
3915 unsigned char *mac_addr_p;
3918 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3921 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3922 VETH_MAC_ADDR, NULL);
3925 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3926 __FILE__, __LINE__);
3930 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3931 IBMVNIC_MAX_TX_QUEUES);
3935 adapter = netdev_priv(netdev);
3936 adapter->state = VNIC_PROBING;
3937 dev_set_drvdata(&dev->dev, netdev);
3938 adapter->vdev = dev;
3939 adapter->netdev = netdev;
3941 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3942 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3943 netdev->irq = dev->irq;
3944 netdev->netdev_ops = &ibmvnic_netdev_ops;
3945 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3946 SET_NETDEV_DEV(netdev, &dev->dev);
3948 spin_lock_init(&adapter->stats_lock);
3950 INIT_LIST_HEAD(&adapter->errors);
3951 spin_lock_init(&adapter->error_list_lock);
3953 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
3954 INIT_LIST_HEAD(&adapter->rwi_list);
3955 mutex_init(&adapter->reset_lock);
3956 mutex_init(&adapter->rwi_lock);
3957 adapter->resetting = false;
3960 rc = ibmvnic_init(adapter);
3961 if (rc && rc != EAGAIN)
3962 goto ibmvnic_init_fail;
3963 } while (rc == EAGAIN);
3965 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3967 rc = device_create_file(&dev->dev, &dev_attr_failover);
3969 goto ibmvnic_init_fail;
3971 netif_carrier_off(netdev);
3972 rc = register_netdev(netdev);
3974 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3975 goto ibmvnic_register_fail;
3977 dev_info(&dev->dev, "ibmvnic registered\n");
3979 adapter->state = VNIC_PROBED;
3982 ibmvnic_register_fail:
3983 device_remove_file(&dev->dev, &dev_attr_failover);
3986 release_sub_crqs(adapter);
3987 release_crq_queue(adapter);
3988 free_netdev(netdev);
3993 static int ibmvnic_remove(struct vio_dev *dev)
3995 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3996 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3998 adapter->state = VNIC_REMOVING;
3999 unregister_netdev(netdev);
4000 mutex_lock(&adapter->reset_lock);
4002 release_resources(adapter);
4003 release_sub_crqs(adapter);
4004 release_crq_queue(adapter);
4006 adapter->state = VNIC_REMOVED;
4008 mutex_unlock(&adapter->reset_lock);
4009 device_remove_file(&dev->dev, &dev_attr_failover);
4010 free_netdev(netdev);
4011 dev_set_drvdata(&dev->dev, NULL);
4016 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4017 const char *buf, size_t count)
4019 struct net_device *netdev = dev_get_drvdata(dev);
4020 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4021 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4022 __be64 session_token;
4025 if (!sysfs_streq(buf, "1"))
4028 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4029 H_GET_SESSION_TOKEN, 0, 0, 0);
4031 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4036 session_token = (__be64)retbuf[0];
4037 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4038 be64_to_cpu(session_token));
4039 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4040 H_SESSION_ERR_DETECTED, session_token, 0, 0);
4042 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4050 static DEVICE_ATTR(failover, 0200, NULL, failover_store);
4052 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4054 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4055 struct ibmvnic_adapter *adapter;
4056 struct iommu_table *tbl;
4057 unsigned long ret = 0;
4060 tbl = get_iommu_table_base(&vdev->dev);
4062 /* netdev inits at probe time along with the structures we need below*/
4064 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4066 adapter = netdev_priv(netdev);
4068 ret += PAGE_SIZE; /* the crq message queue */
4069 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4071 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4072 ret += 4 * PAGE_SIZE; /* the scrq message queue */
4074 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4076 ret += adapter->rx_pool[i].size *
4077 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4082 static int ibmvnic_resume(struct device *dev)
4084 struct net_device *netdev = dev_get_drvdata(dev);
4085 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4087 if (adapter->state != VNIC_OPEN)
4090 tasklet_schedule(&adapter->tasklet);
4095 static const struct vio_device_id ibmvnic_device_table[] = {
4096 {"network", "IBM,vnic"},
4099 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
4101 static const struct dev_pm_ops ibmvnic_pm_ops = {
4102 .resume = ibmvnic_resume
4105 static struct vio_driver ibmvnic_driver = {
4106 .id_table = ibmvnic_device_table,
4107 .probe = ibmvnic_probe,
4108 .remove = ibmvnic_remove,
4109 .get_desired_dma = ibmvnic_get_desired_dma,
4110 .name = ibmvnic_driver_name,
4111 .pm = &ibmvnic_pm_ops,
4114 /* module functions */
4115 static int __init ibmvnic_module_init(void)
4117 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
4118 IBMVNIC_DRIVER_VERSION);
4120 return vio_register_driver(&ibmvnic_driver);
4123 static void __exit ibmvnic_module_exit(void)
4125 vio_unregister_driver(&ibmvnic_driver);
4128 module_init(ibmvnic_module_init);
4129 module_exit(ibmvnic_module_exit);