2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/nls.h>
25 #include <linux/platform_device.h>
26 #include <linux/netdevice.h>
27 #include <linux/interrupt.h>
33 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
34 #define DRV_NAME "fjes"
35 char fjes_driver_name[] = DRV_NAME;
36 char fjes_driver_version[] = DRV_VERSION;
37 static const char fjes_driver_string[] =
38 "FUJITSU Extended Socket Network Device Driver";
39 static const char fjes_copyright[] =
40 "Copyright (c) 2015 FUJITSU LIMITED";
42 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
43 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_VERSION);
47 static int fjes_request_irq(struct fjes_adapter *);
48 static void fjes_free_irq(struct fjes_adapter *);
50 static int fjes_open(struct net_device *);
51 static int fjes_close(struct net_device *);
52 static int fjes_setup_resources(struct fjes_adapter *);
53 static void fjes_free_resources(struct fjes_adapter *);
54 static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *);
55 static void fjes_raise_intr_rxdata_task(struct work_struct *);
56 static void fjes_tx_stall_task(struct work_struct *);
57 static void fjes_force_close_task(struct work_struct *);
58 static irqreturn_t fjes_intr(int, void*);
59 static struct rtnl_link_stats64 *
60 fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
61 static int fjes_change_mtu(struct net_device *, int);
62 static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
63 static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
64 static void fjes_tx_retry(struct net_device *);
66 static int fjes_acpi_add(struct acpi_device *);
67 static int fjes_acpi_remove(struct acpi_device *);
68 static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*);
70 static int fjes_probe(struct platform_device *);
71 static int fjes_remove(struct platform_device *);
73 static int fjes_sw_init(struct fjes_adapter *);
74 static void fjes_netdev_setup(struct net_device *);
75 static void fjes_irq_watch_task(struct work_struct *);
76 static void fjes_watch_unshare_task(struct work_struct *);
77 static void fjes_rx_irq(struct fjes_adapter *, int);
78 static int fjes_poll(struct napi_struct *, int);
80 static const struct acpi_device_id fjes_acpi_ids[] = {
84 MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
86 static struct acpi_driver fjes_acpi_driver = {
93 .remove = fjes_acpi_remove,
97 static struct platform_driver fjes_driver = {
102 .remove = fjes_remove,
105 static struct resource fjes_resource[] = {
107 .flags = IORESOURCE_MEM,
112 .flags = IORESOURCE_IRQ,
118 static int fjes_acpi_add(struct acpi_device *device)
120 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
121 char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
122 struct platform_device *plat_dev;
123 union acpi_object *str;
127 status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
128 if (ACPI_FAILURE(status))
131 str = buffer.pointer;
132 result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
133 str->string.length, UTF16_LITTLE_ENDIAN,
134 str_buf, sizeof(str_buf) - 1);
137 if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
138 kfree(buffer.pointer);
141 kfree(buffer.pointer);
143 status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
144 fjes_get_acpi_resource, fjes_resource);
145 if (ACPI_FAILURE(status))
148 /* create platform_device */
149 plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
150 ARRAY_SIZE(fjes_resource));
151 if (IS_ERR(plat_dev))
152 return PTR_ERR(plat_dev);
154 device->driver_data = plat_dev;
159 static int fjes_acpi_remove(struct acpi_device *device)
161 struct platform_device *plat_dev;
163 plat_dev = (struct platform_device *)acpi_driver_data(device);
164 platform_device_unregister(plat_dev);
170 fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
172 struct acpi_resource_address32 *addr;
173 struct acpi_resource_irq *irq;
174 struct resource *res = data;
176 switch (acpi_res->type) {
177 case ACPI_RESOURCE_TYPE_ADDRESS32:
178 addr = &acpi_res->data.address32;
179 res[0].start = addr->address.minimum;
180 res[0].end = addr->address.minimum +
181 addr->address.address_length - 1;
184 case ACPI_RESOURCE_TYPE_IRQ:
185 irq = &acpi_res->data.irq;
186 if (irq->interrupt_count != 1)
188 res[1].start = irq->interrupts[0];
189 res[1].end = irq->interrupts[0];
199 static int fjes_request_irq(struct fjes_adapter *adapter)
201 struct net_device *netdev = adapter->netdev;
204 adapter->interrupt_watch_enable = true;
205 if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
206 queue_delayed_work(adapter->control_wq,
207 &adapter->interrupt_watch_task,
208 FJES_IRQ_WATCH_DELAY);
211 if (!adapter->irq_registered) {
212 result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
213 IRQF_SHARED, netdev->name, adapter);
215 adapter->irq_registered = false;
217 adapter->irq_registered = true;
223 static void fjes_free_irq(struct fjes_adapter *adapter)
225 struct fjes_hw *hw = &adapter->hw;
227 adapter->interrupt_watch_enable = false;
228 cancel_delayed_work_sync(&adapter->interrupt_watch_task);
230 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
232 if (adapter->irq_registered) {
233 free_irq(adapter->hw.hw_res.irq, adapter);
234 adapter->irq_registered = false;
238 static const struct net_device_ops fjes_netdev_ops = {
239 .ndo_open = fjes_open,
240 .ndo_stop = fjes_close,
241 .ndo_start_xmit = fjes_xmit_frame,
242 .ndo_get_stats64 = fjes_get_stats64,
243 .ndo_change_mtu = fjes_change_mtu,
244 .ndo_tx_timeout = fjes_tx_retry,
245 .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid,
246 .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
249 /* fjes_open - Called when a network interface is made active */
250 static int fjes_open(struct net_device *netdev)
252 struct fjes_adapter *adapter = netdev_priv(netdev);
253 struct fjes_hw *hw = &adapter->hw;
256 if (adapter->open_guard)
259 result = fjes_setup_resources(adapter);
263 hw->txrx_stop_req_bit = 0;
264 hw->epstop_req_bit = 0;
266 napi_enable(&adapter->napi);
268 fjes_hw_capture_interrupt_status(hw);
270 result = fjes_request_irq(adapter);
274 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
276 netif_tx_start_all_queues(netdev);
277 netif_carrier_on(netdev);
282 fjes_free_irq(adapter);
283 napi_disable(&adapter->napi);
286 fjes_free_resources(adapter);
290 /* fjes_close - Disables a network interface */
291 static int fjes_close(struct net_device *netdev)
293 struct fjes_adapter *adapter = netdev_priv(netdev);
294 struct fjes_hw *hw = &adapter->hw;
298 netif_tx_stop_all_queues(netdev);
299 netif_carrier_off(netdev);
301 fjes_hw_raise_epstop(hw);
303 napi_disable(&adapter->napi);
305 spin_lock_irqsave(&hw->rx_status_lock, flags);
306 for (epidx = 0; epidx < hw->max_epid; epidx++) {
307 if (epidx == hw->my_epid)
310 if (fjes_hw_get_partner_ep_status(hw, epidx) ==
312 adapter->hw.ep_shm_info[epidx]
313 .tx.info->v1i.rx_status &=
316 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
318 fjes_free_irq(adapter);
320 cancel_delayed_work_sync(&adapter->interrupt_watch_task);
321 cancel_work_sync(&adapter->unshare_watch_task);
322 adapter->unshare_watch_bitmask = 0;
323 cancel_work_sync(&adapter->raise_intr_rxdata_task);
324 cancel_work_sync(&adapter->tx_stall_task);
326 cancel_work_sync(&hw->update_zone_task);
327 cancel_work_sync(&hw->epstop_task);
329 fjes_hw_wait_epstop(hw);
331 fjes_free_resources(adapter);
336 static int fjes_setup_resources(struct fjes_adapter *adapter)
338 struct net_device *netdev = adapter->netdev;
339 struct ep_share_mem_info *buf_pair;
340 struct fjes_hw *hw = &adapter->hw;
345 mutex_lock(&hw->hw_info.lock);
346 result = fjes_hw_request_info(hw);
349 for (epidx = 0; epidx < hw->max_epid; epidx++) {
350 hw->ep_shm_info[epidx].es_status =
351 hw->hw_info.res_buf->info.info[epidx].es_status;
352 hw->ep_shm_info[epidx].zone =
353 hw->hw_info.res_buf->info.info[epidx].zone;
359 adapter->force_reset = true;
361 mutex_unlock(&hw->hw_info.lock);
364 mutex_unlock(&hw->hw_info.lock);
366 for (epidx = 0; epidx < (hw->max_epid); epidx++) {
367 if ((epidx != hw->my_epid) &&
368 (hw->ep_shm_info[epidx].es_status ==
369 FJES_ZONING_STATUS_ENABLE)) {
370 fjes_hw_raise_interrupt(hw, epidx,
371 REG_ICTL_MASK_INFO_UPDATE);
375 msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
377 for (epidx = 0; epidx < (hw->max_epid); epidx++) {
378 if (epidx == hw->my_epid)
381 buf_pair = &hw->ep_shm_info[epidx];
383 spin_lock_irqsave(&hw->rx_status_lock, flags);
384 fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
386 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
388 if (fjes_hw_epid_is_same_zone(hw, epidx)) {
389 mutex_lock(&hw->hw_info.lock);
391 fjes_hw_register_buff_addr(hw, epidx, buf_pair);
392 mutex_unlock(&hw->hw_info.lock);
400 adapter->force_reset = true;
409 static void fjes_free_resources(struct fjes_adapter *adapter)
411 struct net_device *netdev = adapter->netdev;
412 struct fjes_device_command_param param;
413 struct ep_share_mem_info *buf_pair;
414 struct fjes_hw *hw = &adapter->hw;
415 bool reset_flag = false;
420 for (epidx = 0; epidx < hw->max_epid; epidx++) {
421 if (epidx == hw->my_epid)
424 mutex_lock(&hw->hw_info.lock);
425 result = fjes_hw_unregister_buff_addr(hw, epidx);
426 mutex_unlock(&hw->hw_info.lock);
431 buf_pair = &hw->ep_shm_info[epidx];
433 spin_lock_irqsave(&hw->rx_status_lock, flags);
434 fjes_hw_setup_epbuf(&buf_pair->tx,
435 netdev->dev_addr, netdev->mtu);
436 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
438 clear_bit(epidx, &hw->txrx_stop_req_bit);
441 if (reset_flag || adapter->force_reset) {
442 result = fjes_hw_reset(hw);
444 adapter->force_reset = false;
447 adapter->open_guard = true;
449 hw->hw_info.buffer_share_bit = 0;
451 memset((void *)¶m, 0, sizeof(param));
453 param.req_len = hw->hw_info.req_buf_size;
454 param.req_start = __pa(hw->hw_info.req_buf);
455 param.res_len = hw->hw_info.res_buf_size;
456 param.res_start = __pa(hw->hw_info.res_buf);
457 param.share_start = __pa(hw->hw_info.share->ep_status);
459 fjes_hw_init_command_registers(hw, ¶m);
463 static void fjes_tx_stall_task(struct work_struct *work)
465 struct fjes_adapter *adapter = container_of(work,
466 struct fjes_adapter, tx_stall_task);
467 struct net_device *netdev = adapter->netdev;
468 struct fjes_hw *hw = &adapter->hw;
469 int all_queue_available, sendable;
470 enum ep_partner_status pstatus;
471 int max_epid, my_epid, epid;
472 union ep_buffer_info *info;
476 dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
477 netif_wake_queue(netdev);
481 my_epid = hw->my_epid;
482 max_epid = hw->max_epid;
484 for (i = 0; i < 5; i++) {
485 all_queue_available = 1;
487 for (epid = 0; epid < max_epid; epid++) {
491 pstatus = fjes_hw_get_partner_ep_status(hw, epid);
492 sendable = (pstatus == EP_PARTNER_SHARED);
496 info = adapter->hw.ep_shm_info[epid].tx.info;
498 if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
501 if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
502 info->v1i.count_max)) {
503 all_queue_available = 0;
508 if (all_queue_available) {
509 netif_wake_queue(netdev);
514 usleep_range(50, 100);
516 queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
519 static void fjes_force_close_task(struct work_struct *work)
521 struct fjes_adapter *adapter = container_of(work,
522 struct fjes_adapter, force_close_task);
523 struct net_device *netdev = adapter->netdev;
530 static void fjes_raise_intr_rxdata_task(struct work_struct *work)
532 struct fjes_adapter *adapter = container_of(work,
533 struct fjes_adapter, raise_intr_rxdata_task);
534 struct fjes_hw *hw = &adapter->hw;
535 enum ep_partner_status pstatus;
536 int max_epid, my_epid, epid;
538 my_epid = hw->my_epid;
539 max_epid = hw->max_epid;
541 for (epid = 0; epid < max_epid; epid++)
542 hw->ep_shm_info[epid].tx_status_work = 0;
544 for (epid = 0; epid < max_epid; epid++) {
548 pstatus = fjes_hw_get_partner_ep_status(hw, epid);
549 if (pstatus == EP_PARTNER_SHARED) {
550 hw->ep_shm_info[epid].tx_status_work =
551 hw->ep_shm_info[epid].tx.info->v1i.tx_status;
553 if (hw->ep_shm_info[epid].tx_status_work ==
554 FJES_TX_DELAY_SEND_PENDING) {
555 hw->ep_shm_info[epid].tx.info->v1i.tx_status =
556 FJES_TX_DELAY_SEND_NONE;
561 for (epid = 0; epid < max_epid; epid++) {
565 pstatus = fjes_hw_get_partner_ep_status(hw, epid);
566 if ((hw->ep_shm_info[epid].tx_status_work ==
567 FJES_TX_DELAY_SEND_PENDING) &&
568 (pstatus == EP_PARTNER_SHARED) &&
569 !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
570 FJES_RX_POLL_WORK)) {
571 fjes_hw_raise_interrupt(hw, epid,
572 REG_ICTL_MASK_RX_DATA);
576 usleep_range(500, 1000);
579 static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
580 void *data, size_t len)
584 retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
589 adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
590 FJES_TX_DELAY_SEND_PENDING;
591 if (!work_pending(&adapter->raise_intr_rxdata_task))
592 queue_work(adapter->txrx_wq,
593 &adapter->raise_intr_rxdata_task);
600 fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
602 struct fjes_adapter *adapter = netdev_priv(netdev);
603 struct fjes_hw *hw = &adapter->hw;
605 int max_epid, my_epid, dest_epid;
606 enum ep_partner_status pstatus;
607 struct netdev_queue *cur_queue;
608 char shortpkt[VLAN_ETH_HLEN];
619 cur_queue = netdev_get_tx_queue(netdev, queue_no);
621 eth = (struct ethhdr *)skb->data;
622 my_epid = hw->my_epid;
624 vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
629 if (is_multicast_ether_addr(eth->h_dest)) {
631 max_epid = hw->max_epid;
633 } else if (is_local_ether_addr(eth->h_dest)) {
634 dest_epid = eth->h_dest[ETH_ALEN - 1];
635 max_epid = dest_epid + 1;
637 if ((eth->h_dest[0] == 0x02) &&
638 (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
639 eth->h_dest[3] | eth->h_dest[4])) &&
640 (dest_epid < hw->max_epid)) {
647 adapter->stats64.tx_packets += 1;
648 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
649 adapter->stats64.tx_bytes += len;
650 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
657 adapter->stats64.tx_packets += 1;
658 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
659 adapter->stats64.tx_bytes += len;
660 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
663 for (; dest_epid < max_epid; dest_epid++) {
664 if (my_epid == dest_epid)
667 pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
668 if (pstatus != EP_PARTNER_SHARED) {
670 } else if (!fjes_hw_check_epbuf_version(
671 &adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
672 /* version is NOT 0 */
673 adapter->stats64.tx_carrier_errors += 1;
674 hw->ep_shm_info[dest_epid].net_stats
675 .tx_carrier_errors += 1;
678 } else if (!fjes_hw_check_mtu(
679 &adapter->hw.ep_shm_info[dest_epid].rx,
681 adapter->stats64.tx_dropped += 1;
682 hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
683 adapter->stats64.tx_errors += 1;
684 hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
688 !fjes_hw_check_vlan_id(
689 &adapter->hw.ep_shm_info[dest_epid].rx,
693 if (len < VLAN_ETH_HLEN) {
694 memset(shortpkt, 0, VLAN_ETH_HLEN);
695 memcpy(shortpkt, skb->data, skb->len);
700 if (adapter->tx_retry_count == 0) {
701 adapter->tx_start_jiffies = jiffies;
702 adapter->tx_retry_count = 1;
704 adapter->tx_retry_count++;
707 if (fjes_tx_send(adapter, dest_epid, data, len)) {
712 (long)adapter->tx_start_jiffies) >=
713 FJES_TX_RETRY_TIMEOUT) {
714 adapter->stats64.tx_fifo_errors += 1;
715 hw->ep_shm_info[dest_epid].net_stats
716 .tx_fifo_errors += 1;
717 adapter->stats64.tx_errors += 1;
718 hw->ep_shm_info[dest_epid].net_stats
723 netif_trans_update(netdev);
724 netif_tx_stop_queue(cur_queue);
726 if (!work_pending(&adapter->tx_stall_task))
727 queue_work(adapter->txrx_wq,
728 &adapter->tx_stall_task);
730 ret = NETDEV_TX_BUSY;
734 adapter->stats64.tx_packets += 1;
735 hw->ep_shm_info[dest_epid].net_stats
737 adapter->stats64.tx_bytes += len;
738 hw->ep_shm_info[dest_epid].net_stats
742 adapter->tx_retry_count = 0;
748 if (ret == NETDEV_TX_OK) {
751 adapter->stats64.tx_packets += 1;
752 hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
753 adapter->stats64.tx_bytes += 1;
754 hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
761 static void fjes_tx_retry(struct net_device *netdev)
763 struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
765 netif_tx_wake_queue(queue);
768 static struct rtnl_link_stats64 *
769 fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
771 struct fjes_adapter *adapter = netdev_priv(netdev);
773 memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
778 static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
780 struct fjes_adapter *adapter = netdev_priv(netdev);
781 bool running = netif_running(netdev);
782 struct fjes_hw *hw = &adapter->hw;
787 for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
788 if (new_mtu <= fjes_support_mtu[idx]) {
789 new_mtu = fjes_support_mtu[idx];
790 if (new_mtu == netdev->mtu)
802 spin_lock_irqsave(&hw->rx_status_lock, flags);
803 for (epidx = 0; epidx < hw->max_epid; epidx++) {
804 if (epidx == hw->my_epid)
806 hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
807 ~FJES_RX_MTU_CHANGING_DONE;
809 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
811 netif_tx_stop_all_queues(netdev);
812 netif_carrier_off(netdev);
813 cancel_work_sync(&adapter->tx_stall_task);
814 napi_disable(&adapter->napi);
818 netif_tx_stop_all_queues(netdev);
821 netdev->mtu = new_mtu;
824 for (epidx = 0; epidx < hw->max_epid; epidx++) {
825 if (epidx == hw->my_epid)
828 spin_lock_irqsave(&hw->rx_status_lock, flags);
829 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
833 hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
834 FJES_RX_MTU_CHANGING_DONE;
835 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
838 netif_tx_wake_all_queues(netdev);
839 netif_carrier_on(netdev);
840 napi_enable(&adapter->napi);
841 napi_schedule(&adapter->napi);
847 static int fjes_vlan_rx_add_vid(struct net_device *netdev,
848 __be16 proto, u16 vid)
850 struct fjes_adapter *adapter = netdev_priv(netdev);
854 for (epid = 0; epid < adapter->hw.max_epid; epid++) {
855 if (epid == adapter->hw.my_epid)
858 if (!fjes_hw_check_vlan_id(
859 &adapter->hw.ep_shm_info[epid].tx, vid))
860 ret = fjes_hw_set_vlan_id(
861 &adapter->hw.ep_shm_info[epid].tx, vid);
864 return ret ? 0 : -ENOSPC;
867 static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
868 __be16 proto, u16 vid)
870 struct fjes_adapter *adapter = netdev_priv(netdev);
873 for (epid = 0; epid < adapter->hw.max_epid; epid++) {
874 if (epid == adapter->hw.my_epid)
877 fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
883 static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
886 struct fjes_hw *hw = &adapter->hw;
887 enum ep_partner_status status;
890 status = fjes_hw_get_partner_ep_status(hw, src_epid);
892 case EP_PARTNER_UNSHARE:
893 case EP_PARTNER_COMPLETE:
896 case EP_PARTNER_WAITING:
897 if (src_epid < hw->my_epid) {
898 spin_lock_irqsave(&hw->rx_status_lock, flags);
899 hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
900 FJES_RX_STOP_REQ_DONE;
901 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
903 clear_bit(src_epid, &hw->txrx_stop_req_bit);
904 set_bit(src_epid, &adapter->unshare_watch_bitmask);
906 if (!work_pending(&adapter->unshare_watch_task))
907 queue_work(adapter->control_wq,
908 &adapter->unshare_watch_task);
911 case EP_PARTNER_SHARED:
912 if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
913 FJES_RX_STOP_REQ_REQUEST) {
914 set_bit(src_epid, &hw->epstop_req_bit);
915 if (!work_pending(&hw->epstop_task))
916 queue_work(adapter->control_wq,
923 static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
925 struct fjes_hw *hw = &adapter->hw;
926 enum ep_partner_status status;
929 set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
931 status = fjes_hw_get_partner_ep_status(hw, src_epid);
933 case EP_PARTNER_WAITING:
934 spin_lock_irqsave(&hw->rx_status_lock, flags);
935 hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
936 FJES_RX_STOP_REQ_DONE;
937 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
938 clear_bit(src_epid, &hw->txrx_stop_req_bit);
940 case EP_PARTNER_UNSHARE:
941 case EP_PARTNER_COMPLETE:
943 set_bit(src_epid, &adapter->unshare_watch_bitmask);
944 if (!work_pending(&adapter->unshare_watch_task))
945 queue_work(adapter->control_wq,
946 &adapter->unshare_watch_task);
948 case EP_PARTNER_SHARED:
949 set_bit(src_epid, &hw->epstop_req_bit);
951 if (!work_pending(&hw->epstop_task))
952 queue_work(adapter->control_wq, &hw->epstop_task);
957 static void fjes_update_zone_irq(struct fjes_adapter *adapter,
960 struct fjes_hw *hw = &adapter->hw;
962 if (!work_pending(&hw->update_zone_task))
963 queue_work(adapter->control_wq, &hw->update_zone_task);
966 static irqreturn_t fjes_intr(int irq, void *data)
968 struct fjes_adapter *adapter = data;
969 struct fjes_hw *hw = &adapter->hw;
973 icr = fjes_hw_capture_interrupt_status(hw);
975 if (icr & REG_IS_MASK_IS_ASSERT) {
976 if (icr & REG_ICTL_MASK_RX_DATA)
977 fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
979 if (icr & REG_ICTL_MASK_DEV_STOP_REQ)
980 fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
982 if (icr & REG_ICTL_MASK_TXRX_STOP_REQ)
983 fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
985 if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
986 fjes_hw_set_irqmask(hw,
987 REG_ICTL_MASK_TXRX_STOP_DONE, true);
989 if (icr & REG_ICTL_MASK_INFO_UPDATE)
990 fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
1000 static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
1003 struct fjes_hw *hw = &adapter->hw;
1004 enum ep_partner_status pstatus;
1005 int max_epid, cur_epid;
1008 max_epid = hw->max_epid;
1009 start_epid = (start_epid + 1 + max_epid) % max_epid;
1011 for (i = 0; i < max_epid; i++) {
1012 cur_epid = (start_epid + i) % max_epid;
1013 if (cur_epid == hw->my_epid)
1016 pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
1017 if (pstatus == EP_PARTNER_SHARED) {
1018 if (!fjes_hw_epbuf_rx_is_empty(
1019 &hw->ep_shm_info[cur_epid].rx))
1026 static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
1031 *cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
1036 fjes_hw_epbuf_rx_curpkt_get_addr(
1037 &adapter->hw.ep_shm_info[*cur_epid].rx, psize);
1042 static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
1044 fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
1047 static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
1049 struct fjes_hw *hw = &adapter->hw;
1051 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
1053 adapter->unset_rx_last = true;
1054 napi_schedule(&adapter->napi);
1057 static int fjes_poll(struct napi_struct *napi, int budget)
1059 struct fjes_adapter *adapter =
1060 container_of(napi, struct fjes_adapter, napi);
1061 struct net_device *netdev = napi->dev;
1062 struct fjes_hw *hw = &adapter->hw;
1063 struct sk_buff *skb;
1070 spin_lock(&hw->rx_status_lock);
1071 for (epidx = 0; epidx < hw->max_epid; epidx++) {
1072 if (epidx == hw->my_epid)
1075 if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1077 adapter->hw.ep_shm_info[epidx]
1078 .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
1080 spin_unlock(&hw->rx_status_lock);
1082 while (work_done < budget) {
1083 prefetch(&adapter->hw);
1084 frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
1087 skb = napi_alloc_skb(napi, frame_len);
1089 adapter->stats64.rx_dropped += 1;
1090 hw->ep_shm_info[cur_epid].net_stats
1092 adapter->stats64.rx_errors += 1;
1093 hw->ep_shm_info[cur_epid].net_stats
1096 memcpy(skb_put(skb, frame_len),
1098 skb->protocol = eth_type_trans(skb, netdev);
1099 skb->ip_summed = CHECKSUM_UNNECESSARY;
1101 netif_receive_skb(skb);
1105 adapter->stats64.rx_packets += 1;
1106 hw->ep_shm_info[cur_epid].net_stats
1108 adapter->stats64.rx_bytes += frame_len;
1109 hw->ep_shm_info[cur_epid].net_stats
1110 .rx_bytes += frame_len;
1112 if (is_multicast_ether_addr(
1113 ((struct ethhdr *)frame)->h_dest)) {
1114 adapter->stats64.multicast += 1;
1115 hw->ep_shm_info[cur_epid].net_stats
1120 fjes_rxframe_release(adapter, cur_epid);
1121 adapter->unset_rx_last = true;
1127 if (work_done < budget) {
1128 napi_complete(napi);
1130 if (adapter->unset_rx_last) {
1131 adapter->rx_last_jiffies = jiffies;
1132 adapter->unset_rx_last = false;
1135 if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
1136 napi_reschedule(napi);
1138 spin_lock(&hw->rx_status_lock);
1139 for (epidx = 0; epidx < hw->max_epid; epidx++) {
1140 if (epidx == hw->my_epid)
1142 if (fjes_hw_get_partner_ep_status(hw, epidx) ==
1144 adapter->hw.ep_shm_info[epidx].tx
1145 .info->v1i.rx_status &=
1148 spin_unlock(&hw->rx_status_lock);
1150 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
1157 /* fjes_probe - Device Initialization Routine */
1158 static int fjes_probe(struct platform_device *plat_dev)
1160 struct fjes_adapter *adapter;
1161 struct net_device *netdev;
1162 struct resource *res;
1167 netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
1168 NET_NAME_UNKNOWN, fjes_netdev_setup,
1174 SET_NETDEV_DEV(netdev, &plat_dev->dev);
1176 dev_set_drvdata(&plat_dev->dev, netdev);
1177 adapter = netdev_priv(netdev);
1178 adapter->netdev = netdev;
1179 adapter->plat_dev = plat_dev;
1183 /* setup the private structure */
1184 err = fjes_sw_init(adapter);
1186 goto err_free_netdev;
1188 INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
1189 adapter->force_reset = false;
1190 adapter->open_guard = false;
1192 adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
1193 if (unlikely(!adapter->txrx_wq)) {
1195 goto err_free_netdev;
1198 adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
1200 if (unlikely(!adapter->control_wq)) {
1202 goto err_free_txrx_wq;
1205 INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
1206 INIT_WORK(&adapter->raise_intr_rxdata_task,
1207 fjes_raise_intr_rxdata_task);
1208 INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
1209 adapter->unshare_watch_bitmask = 0;
1211 INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
1212 adapter->interrupt_watch_enable = false;
1214 res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
1217 goto err_free_control_wq;
1219 hw->hw_res.start = res->start;
1220 hw->hw_res.size = resource_size(res);
1221 hw->hw_res.irq = platform_get_irq(plat_dev, 0);
1222 if (hw->hw_res.irq < 0) {
1223 err = hw->hw_res.irq;
1224 goto err_free_control_wq;
1227 err = fjes_hw_init(&adapter->hw);
1229 goto err_free_control_wq;
1231 /* setup MAC address (02:00:00:00:00:[epid])*/
1232 netdev->dev_addr[0] = 2;
1233 netdev->dev_addr[1] = 0;
1234 netdev->dev_addr[2] = 0;
1235 netdev->dev_addr[3] = 0;
1236 netdev->dev_addr[4] = 0;
1237 netdev->dev_addr[5] = hw->my_epid; /* EPID */
1239 err = register_netdev(netdev);
1243 netif_carrier_off(netdev);
1248 fjes_hw_exit(&adapter->hw);
1249 err_free_control_wq:
1250 destroy_workqueue(adapter->control_wq);
1252 destroy_workqueue(adapter->txrx_wq);
1254 free_netdev(netdev);
1259 /* fjes_remove - Device Removal Routine */
1260 static int fjes_remove(struct platform_device *plat_dev)
1262 struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
1263 struct fjes_adapter *adapter = netdev_priv(netdev);
1264 struct fjes_hw *hw = &adapter->hw;
1266 cancel_delayed_work_sync(&adapter->interrupt_watch_task);
1267 cancel_work_sync(&adapter->unshare_watch_task);
1268 cancel_work_sync(&adapter->raise_intr_rxdata_task);
1269 cancel_work_sync(&adapter->tx_stall_task);
1270 if (adapter->control_wq)
1271 destroy_workqueue(adapter->control_wq);
1272 if (adapter->txrx_wq)
1273 destroy_workqueue(adapter->txrx_wq);
1275 unregister_netdev(netdev);
1279 netif_napi_del(&adapter->napi);
1281 free_netdev(netdev);
1286 static int fjes_sw_init(struct fjes_adapter *adapter)
1288 struct net_device *netdev = adapter->netdev;
1290 netif_napi_add(netdev, &adapter->napi, fjes_poll, 64);
1295 /* fjes_netdev_setup - netdevice initialization routine */
1296 static void fjes_netdev_setup(struct net_device *netdev)
1298 ether_setup(netdev);
1300 netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
1301 netdev->netdev_ops = &fjes_netdev_ops;
1302 fjes_set_ethtool_ops(netdev);
1303 netdev->mtu = fjes_support_mtu[3];
1304 netdev->flags |= IFF_BROADCAST;
1305 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1308 static void fjes_irq_watch_task(struct work_struct *work)
1310 struct fjes_adapter *adapter = container_of(to_delayed_work(work),
1311 struct fjes_adapter, interrupt_watch_task);
1313 local_irq_disable();
1314 fjes_intr(adapter->hw.hw_res.irq, adapter);
1317 if (fjes_rxframe_search_exist(adapter, 0) >= 0)
1318 napi_schedule(&adapter->napi);
1320 if (adapter->interrupt_watch_enable) {
1321 if (!delayed_work_pending(&adapter->interrupt_watch_task))
1322 queue_delayed_work(adapter->control_wq,
1323 &adapter->interrupt_watch_task,
1324 FJES_IRQ_WATCH_DELAY);
1328 static void fjes_watch_unshare_task(struct work_struct *work)
1330 struct fjes_adapter *adapter =
1331 container_of(work, struct fjes_adapter, unshare_watch_task);
1333 struct net_device *netdev = adapter->netdev;
1334 struct fjes_hw *hw = &adapter->hw;
1336 int unshare_watch, unshare_reserve;
1337 int max_epid, my_epid, epidx;
1338 int stop_req, stop_req_done;
1339 ulong unshare_watch_bitmask;
1340 unsigned long flags;
1345 my_epid = hw->my_epid;
1346 max_epid = hw->max_epid;
1348 unshare_watch_bitmask = adapter->unshare_watch_bitmask;
1349 adapter->unshare_watch_bitmask = 0;
1351 while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
1352 (wait_time < 3000)) {
1353 for (epidx = 0; epidx < hw->max_epid; epidx++) {
1354 if (epidx == hw->my_epid)
1357 is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
1360 stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
1362 stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
1363 FJES_RX_STOP_REQ_DONE;
1365 unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
1367 unshare_reserve = test_bit(epidx,
1368 &hw->hw_info.buffer_unshare_reserve_bit);
1371 (is_shared && (!is_shared || !stop_req_done))) &&
1372 (is_shared || !unshare_watch || !unshare_reserve))
1375 mutex_lock(&hw->hw_info.lock);
1376 ret = fjes_hw_unregister_buff_addr(hw, epidx);
1384 &adapter->force_close_task)) {
1385 adapter->force_reset = true;
1387 &adapter->force_close_task);
1391 mutex_unlock(&hw->hw_info.lock);
1393 spin_lock_irqsave(&hw->rx_status_lock, flags);
1394 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1395 netdev->dev_addr, netdev->mtu);
1396 spin_unlock_irqrestore(&hw->rx_status_lock, flags);
1398 clear_bit(epidx, &hw->txrx_stop_req_bit);
1399 clear_bit(epidx, &unshare_watch_bitmask);
1401 &hw->hw_info.buffer_unshare_reserve_bit);
1408 if (hw->hw_info.buffer_unshare_reserve_bit) {
1409 for (epidx = 0; epidx < hw->max_epid; epidx++) {
1410 if (epidx == hw->my_epid)
1414 &hw->hw_info.buffer_unshare_reserve_bit)) {
1415 mutex_lock(&hw->hw_info.lock);
1417 ret = fjes_hw_unregister_buff_addr(hw, epidx);
1425 &adapter->force_close_task)) {
1426 adapter->force_reset = true;
1428 &adapter->force_close_task);
1432 mutex_unlock(&hw->hw_info.lock);
1434 spin_lock_irqsave(&hw->rx_status_lock, flags);
1435 fjes_hw_setup_epbuf(
1436 &hw->ep_shm_info[epidx].tx,
1437 netdev->dev_addr, netdev->mtu);
1438 spin_unlock_irqrestore(&hw->rx_status_lock,
1441 clear_bit(epidx, &hw->txrx_stop_req_bit);
1442 clear_bit(epidx, &unshare_watch_bitmask);
1443 clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1446 if (test_bit(epidx, &unshare_watch_bitmask)) {
1447 spin_lock_irqsave(&hw->rx_status_lock, flags);
1448 hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
1449 ~FJES_RX_STOP_REQ_DONE;
1450 spin_unlock_irqrestore(&hw->rx_status_lock,
1457 /* fjes_init_module - Driver Registration Routine */
1458 static int __init fjes_init_module(void)
1462 pr_info("%s - version %s - %s\n",
1463 fjes_driver_string, fjes_driver_version, fjes_copyright);
1465 result = platform_driver_register(&fjes_driver);
1469 result = acpi_bus_register_driver(&fjes_acpi_driver);
1471 goto fail_acpi_driver;
1476 platform_driver_unregister(&fjes_driver);
1480 module_init(fjes_init_module);
1482 /* fjes_exit_module - Driver Exit Cleanup Routine */
1483 static void __exit fjes_exit_module(void)
1485 acpi_bus_unregister_driver(&fjes_acpi_driver);
1486 platform_driver_unregister(&fjes_driver);
1489 module_exit(fjes_exit_module);