GNU Linux-libre 4.9.337-gnu1
[releases.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *          Ravi Patel <rapatel@apm.com>
6  *          Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include <linux/gpio.h>
23 #include "xgene_enet_main.h"
24 #include "xgene_enet_hw.h"
25 #include "xgene_enet_sgmac.h"
26 #include "xgene_enet_xgmac.h"
27
28 #define RES_ENET_CSR    0
29 #define RES_RING_CSR    1
30 #define RES_RING_CMD    2
31
32 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
33 {
34         struct xgene_enet_raw_desc16 *raw_desc;
35         int i;
36
37         for (i = 0; i < buf_pool->slots; i++) {
38                 raw_desc = &buf_pool->raw_desc16[i];
39
40                 /* Hardware expects descriptor in little endian format */
41                 raw_desc->m0 = cpu_to_le64(i |
42                                 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
43                                 SET_VAL(STASH, 3));
44         }
45 }
46
47 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
48                                      u32 nbuf)
49 {
50         struct sk_buff *skb;
51         struct xgene_enet_raw_desc16 *raw_desc;
52         struct xgene_enet_pdata *pdata;
53         struct net_device *ndev;
54         struct device *dev;
55         dma_addr_t dma_addr;
56         u32 tail = buf_pool->tail;
57         u32 slots = buf_pool->slots - 1;
58         u16 bufdatalen, len;
59         int i;
60
61         ndev = buf_pool->ndev;
62         dev = ndev_to_dev(buf_pool->ndev);
63         pdata = netdev_priv(ndev);
64         bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
65         len = XGENE_ENET_MAX_MTU;
66
67         for (i = 0; i < nbuf; i++) {
68                 raw_desc = &buf_pool->raw_desc16[tail];
69
70                 skb = netdev_alloc_skb_ip_align(ndev, len);
71                 if (unlikely(!skb))
72                         return -ENOMEM;
73
74                 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
75                 if (dma_mapping_error(dev, dma_addr)) {
76                         netdev_err(ndev, "DMA mapping error\n");
77                         dev_kfree_skb_any(skb);
78                         return -EINVAL;
79                 }
80
81                 buf_pool->rx_skb[tail] = skb;
82
83                 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
84                                            SET_VAL(BUFDATALEN, bufdatalen) |
85                                            SET_BIT(COHERENT));
86                 tail = (tail + 1) & slots;
87         }
88
89         pdata->ring_ops->wr_cmd(buf_pool, nbuf);
90         buf_pool->tail = tail;
91
92         return 0;
93 }
94
95 static u8 xgene_enet_hdr_len(const void *data)
96 {
97         const struct ethhdr *eth = data;
98
99         return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
100 }
101
102 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
103 {
104         struct device *dev = ndev_to_dev(buf_pool->ndev);
105         struct xgene_enet_raw_desc16 *raw_desc;
106         dma_addr_t dma_addr;
107         int i;
108
109         /* Free up the buffers held by hardware */
110         for (i = 0; i < buf_pool->slots; i++) {
111                 if (buf_pool->rx_skb[i]) {
112                         dev_kfree_skb_any(buf_pool->rx_skb[i]);
113
114                         raw_desc = &buf_pool->raw_desc16[i];
115                         dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
116                         dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
117                                          DMA_FROM_DEVICE);
118                 }
119         }
120 }
121
122 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
123 {
124         struct xgene_enet_desc_ring *rx_ring = data;
125
126         if (napi_schedule_prep(&rx_ring->napi)) {
127                 disable_irq_nosync(irq);
128                 __napi_schedule(&rx_ring->napi);
129         }
130
131         return IRQ_HANDLED;
132 }
133
134 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
135                                     struct xgene_enet_raw_desc *raw_desc)
136 {
137         struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
138         struct sk_buff *skb;
139         struct device *dev;
140         skb_frag_t *frag;
141         dma_addr_t *frag_dma_addr;
142         u16 skb_index;
143         u8 status;
144         int i, ret = 0;
145         u8 mss_index;
146
147         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
148         skb = cp_ring->cp_skb[skb_index];
149         frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
150
151         dev = ndev_to_dev(cp_ring->ndev);
152         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
153                          skb_headlen(skb),
154                          DMA_TO_DEVICE);
155
156         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
157                 frag = &skb_shinfo(skb)->frags[i];
158                 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
159                                DMA_TO_DEVICE);
160         }
161
162         if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
163                 mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
164                 spin_lock(&pdata->mss_lock);
165                 pdata->mss_refcnt[mss_index]--;
166                 spin_unlock(&pdata->mss_lock);
167         }
168
169         /* Checking for error */
170         status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
171         if (unlikely(status > 2)) {
172                 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
173                                        status);
174                 ret = -EIO;
175         }
176
177         if (likely(skb)) {
178                 dev_kfree_skb_any(skb);
179         } else {
180                 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
181                 ret = -EIO;
182         }
183
184         return ret;
185 }
186
187 static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
188 {
189         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
190         bool mss_index_found = false;
191         int mss_index;
192         int i;
193
194         spin_lock(&pdata->mss_lock);
195
196         /* Reuse the slot if MSS matches */
197         for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
198                 if (pdata->mss[i] == mss) {
199                         pdata->mss_refcnt[i]++;
200                         mss_index = i;
201                         mss_index_found = true;
202                 }
203         }
204
205         /* Overwrite the slot with ref_count = 0 */
206         for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
207                 if (!pdata->mss_refcnt[i]) {
208                         pdata->mss_refcnt[i]++;
209                         pdata->mac_ops->set_mss(pdata, mss, i);
210                         pdata->mss[i] = mss;
211                         mss_index = i;
212                         mss_index_found = true;
213                 }
214         }
215
216         spin_unlock(&pdata->mss_lock);
217
218         /* No slots with ref_count = 0 available, return busy */
219         if (!mss_index_found)
220                 return -EBUSY;
221
222         return mss_index;
223 }
224
225 static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
226 {
227         struct net_device *ndev = skb->dev;
228         struct iphdr *iph;
229         u8 l3hlen = 0, l4hlen = 0;
230         u8 ethhdr, proto = 0, csum_enable = 0;
231         u32 hdr_len, mss = 0;
232         u32 i, len, nr_frags;
233         int mss_index;
234
235         ethhdr = xgene_enet_hdr_len(skb->data);
236
237         if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
238             unlikely(skb->protocol != htons(ETH_P_8021Q)))
239                 goto out;
240
241         if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
242                 goto out;
243
244         iph = ip_hdr(skb);
245         if (unlikely(ip_is_fragment(iph)))
246                 goto out;
247
248         if (likely(iph->protocol == IPPROTO_TCP)) {
249                 l4hlen = tcp_hdrlen(skb) >> 2;
250                 csum_enable = 1;
251                 proto = TSO_IPPROTO_TCP;
252                 if (ndev->features & NETIF_F_TSO) {
253                         hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
254                         mss = skb_shinfo(skb)->gso_size;
255
256                         if (skb_is_nonlinear(skb)) {
257                                 len = skb_headlen(skb);
258                                 nr_frags = skb_shinfo(skb)->nr_frags;
259
260                                 for (i = 0; i < 2 && i < nr_frags; i++)
261                                         len += skb_shinfo(skb)->frags[i].size;
262
263                                 /* HW requires header must reside in 3 buffer */
264                                 if (unlikely(hdr_len > len)) {
265                                         if (skb_linearize(skb))
266                                                 return 0;
267                                 }
268                         }
269
270                         if (!mss || ((skb->len - hdr_len) <= mss))
271                                 goto out;
272
273                         mss_index = xgene_enet_setup_mss(ndev, mss);
274                         if (unlikely(mss_index < 0))
275                                 return -EBUSY;
276
277                         *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
278                 }
279         } else if (iph->protocol == IPPROTO_UDP) {
280                 l4hlen = UDP_HDR_SIZE;
281                 csum_enable = 1;
282         }
283 out:
284         l3hlen = ip_hdrlen(skb) >> 2;
285         *hopinfo |= SET_VAL(TCPHDR, l4hlen) |
286                     SET_VAL(IPHDR, l3hlen) |
287                     SET_VAL(ETHHDR, ethhdr) |
288                     SET_VAL(EC, csum_enable) |
289                     SET_VAL(IS, proto) |
290                     SET_BIT(IC) |
291                     SET_BIT(TYPE_ETH_WORK_MESSAGE);
292
293         return 0;
294 }
295
296 static u16 xgene_enet_encode_len(u16 len)
297 {
298         return (len == BUFLEN_16K) ? 0 : len;
299 }
300
301 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
302 {
303         desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
304                                     SET_VAL(BUFDATALEN, len));
305 }
306
307 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
308 {
309         __le64 *exp_bufs;
310
311         exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
312         memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
313         ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
314
315         return exp_bufs;
316 }
317
318 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
319 {
320         return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
321 }
322
323 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
324                                     struct sk_buff *skb)
325 {
326         struct device *dev = ndev_to_dev(tx_ring->ndev);
327         struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
328         struct xgene_enet_raw_desc *raw_desc;
329         __le64 *exp_desc = NULL, *exp_bufs = NULL;
330         dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
331         skb_frag_t *frag;
332         u16 tail = tx_ring->tail;
333         u64 hopinfo = 0;
334         u32 len, hw_len;
335         u8 ll = 0, nv = 0, idx = 0;
336         bool split = false;
337         u32 size, offset, ell_bytes = 0;
338         u32 i, fidx, nr_frags, count = 1;
339         int ret;
340
341         raw_desc = &tx_ring->raw_desc[tail];
342         tail = (tail + 1) & (tx_ring->slots - 1);
343         memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
344
345         ret = xgene_enet_work_msg(skb, &hopinfo);
346         if (ret)
347                 return ret;
348
349         raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
350                                    hopinfo);
351
352         len = skb_headlen(skb);
353         hw_len = xgene_enet_encode_len(len);
354
355         dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
356         if (dma_mapping_error(dev, dma_addr)) {
357                 netdev_err(tx_ring->ndev, "DMA mapping error\n");
358                 return -EINVAL;
359         }
360
361         /* Hardware expects descriptor in little endian format */
362         raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
363                                    SET_VAL(BUFDATALEN, hw_len) |
364                                    SET_BIT(COHERENT));
365
366         if (!skb_is_nonlinear(skb))
367                 goto out;
368
369         /* scatter gather */
370         nv = 1;
371         exp_desc = (void *)&tx_ring->raw_desc[tail];
372         tail = (tail + 1) & (tx_ring->slots - 1);
373         memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
374
375         nr_frags = skb_shinfo(skb)->nr_frags;
376         for (i = nr_frags; i < 4 ; i++)
377                 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
378
379         frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
380
381         for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
382                 if (!split) {
383                         frag = &skb_shinfo(skb)->frags[fidx];
384                         size = skb_frag_size(frag);
385                         offset = 0;
386
387                         pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
388                                                      DMA_TO_DEVICE);
389                         if (dma_mapping_error(dev, pbuf_addr))
390                                 return -EINVAL;
391
392                         frag_dma_addr[fidx] = pbuf_addr;
393                         fidx++;
394
395                         if (size > BUFLEN_16K)
396                                 split = true;
397                 }
398
399                 if (size > BUFLEN_16K) {
400                         len = BUFLEN_16K;
401                         size -= BUFLEN_16K;
402                 } else {
403                         len = size;
404                         split = false;
405                 }
406
407                 dma_addr = pbuf_addr + offset;
408                 hw_len = xgene_enet_encode_len(len);
409
410                 switch (i) {
411                 case 0:
412                 case 1:
413                 case 2:
414                         xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
415                         break;
416                 case 3:
417                         if (split || (fidx != nr_frags)) {
418                                 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
419                                 xgene_set_addr_len(exp_bufs, idx, dma_addr,
420                                                    hw_len);
421                                 idx++;
422                                 ell_bytes += len;
423                         } else {
424                                 xgene_set_addr_len(exp_desc, i, dma_addr,
425                                                    hw_len);
426                         }
427                         break;
428                 default:
429                         xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
430                         idx++;
431                         ell_bytes += len;
432                         break;
433                 }
434
435                 if (split)
436                         offset += BUFLEN_16K;
437         }
438         count++;
439
440         if (idx) {
441                 ll = 1;
442                 dma_addr = dma_map_single(dev, exp_bufs,
443                                           sizeof(u64) * MAX_EXP_BUFFS,
444                                           DMA_TO_DEVICE);
445                 if (dma_mapping_error(dev, dma_addr)) {
446                         dev_kfree_skb_any(skb);
447                         return -EINVAL;
448                 }
449                 i = ell_bytes >> LL_BYTES_LSB_LEN;
450                 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
451                                           SET_VAL(LL_BYTES_MSB, i) |
452                                           SET_VAL(LL_LEN, idx));
453                 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
454         }
455
456 out:
457         raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
458                                    SET_VAL(USERINFO, tx_ring->tail));
459         tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
460         pdata->tx_level[tx_ring->cp_ring->index] += count;
461         tx_ring->tail = tail;
462
463         return count;
464 }
465
466 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
467                                          struct net_device *ndev)
468 {
469         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
470         struct xgene_enet_desc_ring *tx_ring;
471         int index = skb->queue_mapping;
472         u32 tx_level = pdata->tx_level[index];
473         int count;
474
475         tx_ring = pdata->tx_ring[index];
476         if (tx_level < pdata->txc_level[index])
477                 tx_level += ((typeof(pdata->tx_level[index]))~0U);
478
479         if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
480                 netif_stop_subqueue(ndev, index);
481                 return NETDEV_TX_BUSY;
482         }
483
484         if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
485                 return NETDEV_TX_OK;
486
487         count = xgene_enet_setup_tx_desc(tx_ring, skb);
488         if (count == -EBUSY)
489                 return NETDEV_TX_BUSY;
490
491         if (count <= 0) {
492                 dev_kfree_skb_any(skb);
493                 return NETDEV_TX_OK;
494         }
495
496         skb_tx_timestamp(skb);
497
498         tx_ring->tx_packets++;
499         tx_ring->tx_bytes += skb->len;
500
501         pdata->ring_ops->wr_cmd(tx_ring, count);
502         return NETDEV_TX_OK;
503 }
504
505 static void xgene_enet_rx_csum(struct sk_buff *skb)
506 {
507         struct net_device *ndev = skb->dev;
508         struct iphdr *iph = ip_hdr(skb);
509
510         if (!(ndev->features & NETIF_F_RXCSUM))
511                 return;
512
513         if (skb->protocol != htons(ETH_P_IP))
514                 return;
515
516         if (ip_is_fragment(iph))
517                 return;
518
519         if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
520                 return;
521
522         skb->ip_summed = CHECKSUM_UNNECESSARY;
523 }
524
525 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
526                                struct xgene_enet_raw_desc *raw_desc)
527 {
528         struct net_device *ndev;
529         struct device *dev;
530         struct xgene_enet_desc_ring *buf_pool;
531         u32 datalen, skb_index;
532         struct sk_buff *skb;
533         u8 status;
534         int ret = 0;
535
536         ndev = rx_ring->ndev;
537         dev = ndev_to_dev(rx_ring->ndev);
538         buf_pool = rx_ring->buf_pool;
539
540         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
541                          XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
542         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
543         skb = buf_pool->rx_skb[skb_index];
544         buf_pool->rx_skb[skb_index] = NULL;
545
546         /* checking for error */
547         status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
548                   GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
549         if (unlikely(status)) {
550                 dev_kfree_skb_any(skb);
551                 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
552                                        status);
553                 ret = -EIO;
554                 goto out;
555         }
556
557         /* strip off CRC as HW isn't doing this */
558         datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
559         datalen = (datalen & DATALEN_MASK) - 4;
560         prefetch(skb->data - NET_IP_ALIGN);
561         skb_put(skb, datalen);
562
563         skb_checksum_none_assert(skb);
564         skb->protocol = eth_type_trans(skb, ndev);
565         xgene_enet_rx_csum(skb);
566
567         rx_ring->rx_packets++;
568         rx_ring->rx_bytes += datalen;
569         napi_gro_receive(&rx_ring->napi, skb);
570 out:
571         if (--rx_ring->nbufpool == 0) {
572                 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
573                 rx_ring->nbufpool = NUM_BUFPOOL;
574         }
575
576         return ret;
577 }
578
579 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
580 {
581         return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
582 }
583
584 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
585                                    int budget)
586 {
587         struct net_device *ndev = ring->ndev;
588         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
589         struct xgene_enet_raw_desc *raw_desc, *exp_desc;
590         u16 head = ring->head;
591         u16 slots = ring->slots - 1;
592         int ret, desc_count, count = 0, processed = 0;
593         bool is_completion;
594
595         do {
596                 raw_desc = &ring->raw_desc[head];
597                 desc_count = 0;
598                 is_completion = false;
599                 exp_desc = NULL;
600                 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
601                         break;
602
603                 /* read fpqnum field after dataaddr field */
604                 dma_rmb();
605                 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
606                         head = (head + 1) & slots;
607                         exp_desc = &ring->raw_desc[head];
608
609                         if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
610                                 head = (head - 1) & slots;
611                                 break;
612                         }
613                         dma_rmb();
614                         count++;
615                         desc_count++;
616                 }
617                 if (is_rx_desc(raw_desc)) {
618                         ret = xgene_enet_rx_frame(ring, raw_desc);
619                 } else {
620                         ret = xgene_enet_tx_completion(ring, raw_desc);
621                         is_completion = true;
622                 }
623                 xgene_enet_mark_desc_slot_empty(raw_desc);
624                 if (exp_desc)
625                         xgene_enet_mark_desc_slot_empty(exp_desc);
626
627                 head = (head + 1) & slots;
628                 count++;
629                 desc_count++;
630                 processed++;
631                 if (is_completion)
632                         pdata->txc_level[ring->index] += desc_count;
633
634                 if (ret)
635                         break;
636         } while (--budget);
637
638         if (likely(count)) {
639                 pdata->ring_ops->wr_cmd(ring, -count);
640                 ring->head = head;
641
642                 if (__netif_subqueue_stopped(ndev, ring->index))
643                         netif_start_subqueue(ndev, ring->index);
644         }
645
646         return processed;
647 }
648
649 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
650 {
651         struct xgene_enet_desc_ring *ring;
652         int processed;
653
654         ring = container_of(napi, struct xgene_enet_desc_ring, napi);
655         processed = xgene_enet_process_ring(ring, budget);
656
657         if (processed != budget) {
658                 napi_complete(napi);
659                 enable_irq(ring->irq);
660         }
661
662         return processed;
663 }
664
665 static void xgene_enet_timeout(struct net_device *ndev)
666 {
667         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
668         struct netdev_queue *txq;
669         int i;
670
671         pdata->mac_ops->reset(pdata);
672
673         for (i = 0; i < pdata->txq_cnt; i++) {
674                 txq = netdev_get_tx_queue(ndev, i);
675                 txq->trans_start = jiffies;
676                 netif_tx_start_queue(txq);
677         }
678 }
679
680 static void xgene_enet_set_irq_name(struct net_device *ndev)
681 {
682         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
683         struct xgene_enet_desc_ring *ring;
684         int i;
685
686         for (i = 0; i < pdata->rxq_cnt; i++) {
687                 ring = pdata->rx_ring[i];
688                 if (!pdata->cq_cnt) {
689                         snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
690                                  ndev->name);
691                 } else {
692                         snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
693                                  ndev->name, i);
694                 }
695         }
696
697         for (i = 0; i < pdata->cq_cnt; i++) {
698                 ring = pdata->tx_ring[i]->cp_ring;
699                 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
700                          ndev->name, i);
701         }
702 }
703
704 static int xgene_enet_register_irq(struct net_device *ndev)
705 {
706         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
707         struct device *dev = ndev_to_dev(ndev);
708         struct xgene_enet_desc_ring *ring;
709         int ret = 0, i;
710
711         xgene_enet_set_irq_name(ndev);
712         for (i = 0; i < pdata->rxq_cnt; i++) {
713                 ring = pdata->rx_ring[i];
714                 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
715                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
716                                        0, ring->irq_name, ring);
717                 if (ret) {
718                         netdev_err(ndev, "Failed to request irq %s\n",
719                                    ring->irq_name);
720                 }
721         }
722
723         for (i = 0; i < pdata->cq_cnt; i++) {
724                 ring = pdata->tx_ring[i]->cp_ring;
725                 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
726                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
727                                        0, ring->irq_name, ring);
728                 if (ret) {
729                         netdev_err(ndev, "Failed to request irq %s\n",
730                                    ring->irq_name);
731                 }
732         }
733
734         return ret;
735 }
736
737 static void xgene_enet_free_irq(struct net_device *ndev)
738 {
739         struct xgene_enet_pdata *pdata;
740         struct xgene_enet_desc_ring *ring;
741         struct device *dev;
742         int i;
743
744         pdata = netdev_priv(ndev);
745         dev = ndev_to_dev(ndev);
746
747         for (i = 0; i < pdata->rxq_cnt; i++) {
748                 ring = pdata->rx_ring[i];
749                 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
750                 devm_free_irq(dev, ring->irq, ring);
751         }
752
753         for (i = 0; i < pdata->cq_cnt; i++) {
754                 ring = pdata->tx_ring[i]->cp_ring;
755                 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
756                 devm_free_irq(dev, ring->irq, ring);
757         }
758 }
759
760 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
761 {
762         struct napi_struct *napi;
763         int i;
764
765         for (i = 0; i < pdata->rxq_cnt; i++) {
766                 napi = &pdata->rx_ring[i]->napi;
767                 napi_enable(napi);
768         }
769
770         for (i = 0; i < pdata->cq_cnt; i++) {
771                 napi = &pdata->tx_ring[i]->cp_ring->napi;
772                 napi_enable(napi);
773         }
774 }
775
776 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
777 {
778         struct napi_struct *napi;
779         int i;
780
781         for (i = 0; i < pdata->rxq_cnt; i++) {
782                 napi = &pdata->rx_ring[i]->napi;
783                 napi_disable(napi);
784         }
785
786         for (i = 0; i < pdata->cq_cnt; i++) {
787                 napi = &pdata->tx_ring[i]->cp_ring->napi;
788                 napi_disable(napi);
789         }
790 }
791
792 static int xgene_enet_open(struct net_device *ndev)
793 {
794         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
795         const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
796         int ret;
797
798         ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
799         if (ret)
800                 return ret;
801
802         ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
803         if (ret)
804                 return ret;
805
806         xgene_enet_napi_enable(pdata);
807         ret = xgene_enet_register_irq(ndev);
808         if (ret) {
809                 xgene_enet_napi_disable(pdata);
810                 return ret;
811         }
812
813         if (ndev->phydev) {
814                 phy_start(ndev->phydev);
815         } else {
816                 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
817                 netif_carrier_off(ndev);
818         }
819
820         mac_ops->tx_enable(pdata);
821         mac_ops->rx_enable(pdata);
822         netif_tx_start_all_queues(ndev);
823
824         return ret;
825 }
826
827 static int xgene_enet_close(struct net_device *ndev)
828 {
829         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
830         const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
831         int i;
832
833         netif_tx_stop_all_queues(ndev);
834         mac_ops->tx_disable(pdata);
835         mac_ops->rx_disable(pdata);
836
837         if (ndev->phydev)
838                 phy_stop(ndev->phydev);
839         else
840                 cancel_delayed_work_sync(&pdata->link_work);
841
842         xgene_enet_free_irq(ndev);
843         xgene_enet_napi_disable(pdata);
844         for (i = 0; i < pdata->rxq_cnt; i++)
845                 xgene_enet_process_ring(pdata->rx_ring[i], -1);
846
847         return 0;
848 }
849 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
850 {
851         struct xgene_enet_pdata *pdata;
852         struct device *dev;
853
854         pdata = netdev_priv(ring->ndev);
855         dev = ndev_to_dev(ring->ndev);
856
857         pdata->ring_ops->clear(ring);
858         dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
859 }
860
861 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
862 {
863         struct xgene_enet_desc_ring *buf_pool;
864         struct xgene_enet_desc_ring *ring;
865         int i;
866
867         for (i = 0; i < pdata->txq_cnt; i++) {
868                 ring = pdata->tx_ring[i];
869                 if (ring) {
870                         xgene_enet_delete_ring(ring);
871                         pdata->port_ops->clear(pdata, ring);
872                         if (pdata->cq_cnt)
873                                 xgene_enet_delete_ring(ring->cp_ring);
874                         pdata->tx_ring[i] = NULL;
875                 }
876         }
877
878         for (i = 0; i < pdata->rxq_cnt; i++) {
879                 ring = pdata->rx_ring[i];
880                 if (ring) {
881                         buf_pool = ring->buf_pool;
882                         xgene_enet_delete_bufpool(buf_pool);
883                         xgene_enet_delete_ring(buf_pool);
884                         pdata->port_ops->clear(pdata, buf_pool);
885                         xgene_enet_delete_ring(ring);
886                         pdata->rx_ring[i] = NULL;
887                 }
888         }
889 }
890
891 static int xgene_enet_get_ring_size(struct device *dev,
892                                     enum xgene_enet_ring_cfgsize cfgsize)
893 {
894         int size = -EINVAL;
895
896         switch (cfgsize) {
897         case RING_CFGSIZE_512B:
898                 size = 0x200;
899                 break;
900         case RING_CFGSIZE_2KB:
901                 size = 0x800;
902                 break;
903         case RING_CFGSIZE_16KB:
904                 size = 0x4000;
905                 break;
906         case RING_CFGSIZE_64KB:
907                 size = 0x10000;
908                 break;
909         case RING_CFGSIZE_512KB:
910                 size = 0x80000;
911                 break;
912         default:
913                 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
914                 break;
915         }
916
917         return size;
918 }
919
920 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
921 {
922         struct xgene_enet_pdata *pdata;
923         struct device *dev;
924
925         if (!ring)
926                 return;
927
928         dev = ndev_to_dev(ring->ndev);
929         pdata = netdev_priv(ring->ndev);
930
931         if (ring->desc_addr) {
932                 pdata->ring_ops->clear(ring);
933                 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
934         }
935         devm_kfree(dev, ring);
936 }
937
938 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
939 {
940         struct device *dev = &pdata->pdev->dev;
941         struct xgene_enet_desc_ring *ring;
942         int i;
943
944         for (i = 0; i < pdata->txq_cnt; i++) {
945                 ring = pdata->tx_ring[i];
946                 if (ring) {
947                         if (ring->cp_ring && ring->cp_ring->cp_skb)
948                                 devm_kfree(dev, ring->cp_ring->cp_skb);
949                         if (ring->cp_ring && pdata->cq_cnt)
950                                 xgene_enet_free_desc_ring(ring->cp_ring);
951                         xgene_enet_free_desc_ring(ring);
952                 }
953         }
954
955         for (i = 0; i < pdata->rxq_cnt; i++) {
956                 ring = pdata->rx_ring[i];
957                 if (ring) {
958                         if (ring->buf_pool) {
959                                 if (ring->buf_pool->rx_skb)
960                                         devm_kfree(dev, ring->buf_pool->rx_skb);
961                                 xgene_enet_free_desc_ring(ring->buf_pool);
962                         }
963                         xgene_enet_free_desc_ring(ring);
964                 }
965         }
966 }
967
968 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
969                                  struct xgene_enet_desc_ring *ring)
970 {
971         if ((pdata->enet_id == XGENE_ENET2) &&
972             (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
973                 return true;
974         }
975
976         return false;
977 }
978
979 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
980                                               struct xgene_enet_desc_ring *ring)
981 {
982         u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
983
984         return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
985 }
986
987 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
988                         struct net_device *ndev, u32 ring_num,
989                         enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
990 {
991         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
992         struct device *dev = ndev_to_dev(ndev);
993         struct xgene_enet_desc_ring *ring;
994         void *irq_mbox_addr;
995         int size;
996
997         size = xgene_enet_get_ring_size(dev, cfgsize);
998         if (size < 0)
999                 return NULL;
1000
1001         ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
1002                             GFP_KERNEL);
1003         if (!ring)
1004                 return NULL;
1005
1006         ring->ndev = ndev;
1007         ring->num = ring_num;
1008         ring->cfgsize = cfgsize;
1009         ring->id = ring_id;
1010
1011         ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
1012                                               GFP_KERNEL | __GFP_ZERO);
1013         if (!ring->desc_addr) {
1014                 devm_kfree(dev, ring);
1015                 return NULL;
1016         }
1017         ring->size = size;
1018
1019         if (is_irq_mbox_required(pdata, ring)) {
1020                 irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
1021                                                     &ring->irq_mbox_dma,
1022                                                     GFP_KERNEL | __GFP_ZERO);
1023                 if (!irq_mbox_addr) {
1024                         dmam_free_coherent(dev, size, ring->desc_addr,
1025                                            ring->dma);
1026                         devm_kfree(dev, ring);
1027                         return NULL;
1028                 }
1029                 ring->irq_mbox_addr = irq_mbox_addr;
1030         }
1031
1032         ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
1033         ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
1034         ring = pdata->ring_ops->setup(ring);
1035         netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
1036                    ring->num, ring->size, ring->id, ring->slots);
1037
1038         return ring;
1039 }
1040
1041 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
1042 {
1043         return (owner << 6) | (bufnum & GENMASK(5, 0));
1044 }
1045
1046 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
1047 {
1048         enum xgene_ring_owner owner;
1049
1050         if (p->enet_id == XGENE_ENET1) {
1051                 switch (p->phy_mode) {
1052                 case PHY_INTERFACE_MODE_SGMII:
1053                         owner = RING_OWNER_ETH0;
1054                         break;
1055                 default:
1056                         owner = (!p->port_id) ? RING_OWNER_ETH0 :
1057                                                 RING_OWNER_ETH1;
1058                         break;
1059                 }
1060         } else {
1061                 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
1062         }
1063
1064         return owner;
1065 }
1066
1067 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
1068 {
1069         struct device *dev = &pdata->pdev->dev;
1070         u32 cpu_bufnum;
1071         int ret;
1072
1073         ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
1074
1075         return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
1076 }
1077
1078 static int xgene_enet_create_desc_rings(struct net_device *ndev)
1079 {
1080         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1081         struct device *dev = ndev_to_dev(ndev);
1082         struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
1083         struct xgene_enet_desc_ring *buf_pool = NULL;
1084         enum xgene_ring_owner owner;
1085         dma_addr_t dma_exp_bufs;
1086         u8 cpu_bufnum;
1087         u8 eth_bufnum = pdata->eth_bufnum;
1088         u8 bp_bufnum = pdata->bp_bufnum;
1089         u16 ring_num = pdata->ring_num;
1090         __le64 *exp_bufs;
1091         u16 ring_id;
1092         int i, ret, size;
1093
1094         cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1095
1096         for (i = 0; i < pdata->rxq_cnt; i++) {
1097                 /* allocate rx descriptor ring */
1098                 owner = xgene_derive_ring_owner(pdata);
1099                 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1100                 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1101                                                       RING_CFGSIZE_16KB,
1102                                                       ring_id);
1103                 if (!rx_ring) {
1104                         ret = -ENOMEM;
1105                         goto err;
1106                 }
1107
1108                 /* allocate buffer pool for receiving packets */
1109                 owner = xgene_derive_ring_owner(pdata);
1110                 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1111                 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1112                                                        RING_CFGSIZE_2KB,
1113                                                        ring_id);
1114                 if (!buf_pool) {
1115                         ret = -ENOMEM;
1116                         goto err;
1117                 }
1118
1119                 rx_ring->nbufpool = NUM_BUFPOOL;
1120                 rx_ring->buf_pool = buf_pool;
1121                 rx_ring->irq = pdata->irqs[i];
1122                 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1123                                                 sizeof(struct sk_buff *),
1124                                                 GFP_KERNEL);
1125                 if (!buf_pool->rx_skb) {
1126                         ret = -ENOMEM;
1127                         goto err;
1128                 }
1129
1130                 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1131                 rx_ring->buf_pool = buf_pool;
1132                 pdata->rx_ring[i] = rx_ring;
1133         }
1134
1135         for (i = 0; i < pdata->txq_cnt; i++) {
1136                 /* allocate tx descriptor ring */
1137                 owner = xgene_derive_ring_owner(pdata);
1138                 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1139                 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1140                                                       RING_CFGSIZE_16KB,
1141                                                       ring_id);
1142                 if (!tx_ring) {
1143                         ret = -ENOMEM;
1144                         goto err;
1145                 }
1146
1147                 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1148                 exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
1149                                                GFP_KERNEL | __GFP_ZERO);
1150                 if (!exp_bufs) {
1151                         ret = -ENOMEM;
1152                         goto err;
1153                 }
1154                 tx_ring->exp_bufs = exp_bufs;
1155
1156                 pdata->tx_ring[i] = tx_ring;
1157
1158                 if (!pdata->cq_cnt) {
1159                         cp_ring = pdata->rx_ring[i];
1160                 } else {
1161                         /* allocate tx completion descriptor ring */
1162                         ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1163                                                          cpu_bufnum++);
1164                         cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1165                                                               RING_CFGSIZE_16KB,
1166                                                               ring_id);
1167                         if (!cp_ring) {
1168                                 ret = -ENOMEM;
1169                                 goto err;
1170                         }
1171
1172                         cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1173                         cp_ring->index = i;
1174                 }
1175
1176                 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1177                                                sizeof(struct sk_buff *),
1178                                                GFP_KERNEL);
1179                 if (!cp_ring->cp_skb) {
1180                         ret = -ENOMEM;
1181                         goto err;
1182                 }
1183
1184                 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1185                 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1186                                                       size, GFP_KERNEL);
1187                 if (!cp_ring->frag_dma_addr) {
1188                         devm_kfree(dev, cp_ring->cp_skb);
1189                         ret = -ENOMEM;
1190                         goto err;
1191                 }
1192
1193                 tx_ring->cp_ring = cp_ring;
1194                 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1195         }
1196
1197         if (pdata->ring_ops->coalesce)
1198                 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1199         pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1200
1201         return 0;
1202
1203 err:
1204         xgene_enet_free_desc_rings(pdata);
1205         return ret;
1206 }
1207
1208 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1209                         struct net_device *ndev,
1210                         struct rtnl_link_stats64 *storage)
1211 {
1212         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1213         struct rtnl_link_stats64 *stats = &pdata->stats;
1214         struct xgene_enet_desc_ring *ring;
1215         int i;
1216
1217         memset(stats, 0, sizeof(struct rtnl_link_stats64));
1218         for (i = 0; i < pdata->txq_cnt; i++) {
1219                 ring = pdata->tx_ring[i];
1220                 if (ring) {
1221                         stats->tx_packets += ring->tx_packets;
1222                         stats->tx_bytes += ring->tx_bytes;
1223                 }
1224         }
1225
1226         for (i = 0; i < pdata->rxq_cnt; i++) {
1227                 ring = pdata->rx_ring[i];
1228                 if (ring) {
1229                         stats->rx_packets += ring->rx_packets;
1230                         stats->rx_bytes += ring->rx_bytes;
1231                         stats->rx_errors += ring->rx_length_errors +
1232                                 ring->rx_crc_errors +
1233                                 ring->rx_frame_errors +
1234                                 ring->rx_fifo_errors;
1235                         stats->rx_dropped += ring->rx_dropped;
1236                 }
1237         }
1238         memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
1239
1240         return storage;
1241 }
1242
1243 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1244 {
1245         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1246         int ret;
1247
1248         ret = eth_mac_addr(ndev, addr);
1249         if (ret)
1250                 return ret;
1251         pdata->mac_ops->set_mac_addr(pdata);
1252
1253         return ret;
1254 }
1255
1256 static const struct net_device_ops xgene_ndev_ops = {
1257         .ndo_open = xgene_enet_open,
1258         .ndo_stop = xgene_enet_close,
1259         .ndo_start_xmit = xgene_enet_start_xmit,
1260         .ndo_tx_timeout = xgene_enet_timeout,
1261         .ndo_get_stats64 = xgene_enet_get_stats64,
1262         .ndo_change_mtu = eth_change_mtu,
1263         .ndo_set_mac_address = xgene_enet_set_mac_address,
1264 };
1265
1266 #ifdef CONFIG_ACPI
1267 static void xgene_get_port_id_acpi(struct device *dev,
1268                                   struct xgene_enet_pdata *pdata)
1269 {
1270         acpi_status status;
1271         u64 temp;
1272
1273         status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1274         if (ACPI_FAILURE(status)) {
1275                 pdata->port_id = 0;
1276         } else {
1277                 pdata->port_id = temp;
1278         }
1279
1280         return;
1281 }
1282 #endif
1283
1284 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1285 {
1286         u32 id = 0;
1287
1288         of_property_read_u32(dev->of_node, "port-id", &id);
1289
1290         pdata->port_id = id & BIT(0);
1291
1292         return;
1293 }
1294
1295 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1296 {
1297         struct device *dev = &pdata->pdev->dev;
1298         int delay, ret;
1299
1300         ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1301         if (ret) {
1302                 pdata->tx_delay = 4;
1303                 return 0;
1304         }
1305
1306         if (delay < 0 || delay > 7) {
1307                 dev_err(dev, "Invalid tx-delay specified\n");
1308                 return -EINVAL;
1309         }
1310
1311         pdata->tx_delay = delay;
1312
1313         return 0;
1314 }
1315
1316 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1317 {
1318         struct device *dev = &pdata->pdev->dev;
1319         int delay, ret;
1320
1321         ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1322         if (ret) {
1323                 pdata->rx_delay = 2;
1324                 return 0;
1325         }
1326
1327         if (delay < 0 || delay > 7) {
1328                 dev_err(dev, "Invalid rx-delay specified\n");
1329                 return -EINVAL;
1330         }
1331
1332         pdata->rx_delay = delay;
1333
1334         return 0;
1335 }
1336
1337 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1338 {
1339         struct platform_device *pdev = pdata->pdev;
1340         struct device *dev = &pdev->dev;
1341         int i, ret, max_irqs;
1342
1343         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1344                 max_irqs = 1;
1345         else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1346                 max_irqs = 2;
1347         else
1348                 max_irqs = XGENE_MAX_ENET_IRQ;
1349
1350         for (i = 0; i < max_irqs; i++) {
1351                 ret = platform_get_irq(pdev, i);
1352                 if (ret <= 0) {
1353                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1354                                 max_irqs = i;
1355                                 pdata->rxq_cnt = max_irqs / 2;
1356                                 pdata->txq_cnt = max_irqs / 2;
1357                                 pdata->cq_cnt = max_irqs / 2;
1358                                 break;
1359                         }
1360                         dev_err(dev, "Unable to get ENET IRQ\n");
1361                         ret = ret ? : -ENXIO;
1362                         return ret;
1363                 }
1364                 pdata->irqs[i] = ret;
1365         }
1366
1367         return 0;
1368 }
1369
1370 static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1371 {
1372         int ret;
1373
1374         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1375                 return 0;
1376
1377         if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1378                 return 0;
1379
1380         ret = xgene_enet_phy_connect(pdata->ndev);
1381         if (!ret)
1382                 pdata->mdio_driver = true;
1383
1384         return 0;
1385 }
1386
1387 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
1388 {
1389         struct device *dev = &pdata->pdev->dev;
1390
1391         if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1392                 return;
1393
1394         pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
1395         if (IS_ERR(pdata->sfp_rdy))
1396                 pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
1397 }
1398
1399 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1400 {
1401         struct platform_device *pdev;
1402         struct net_device *ndev;
1403         struct device *dev;
1404         struct resource *res;
1405         void __iomem *base_addr;
1406         u32 offset;
1407         int ret = 0;
1408
1409         pdev = pdata->pdev;
1410         dev = &pdev->dev;
1411         ndev = pdata->ndev;
1412
1413         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1414         if (!res) {
1415                 dev_err(dev, "Resource enet_csr not defined\n");
1416                 return -ENODEV;
1417         }
1418         pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1419         if (!pdata->base_addr) {
1420                 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1421                 return -ENOMEM;
1422         }
1423
1424         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1425         if (!res) {
1426                 dev_err(dev, "Resource ring_csr not defined\n");
1427                 return -ENODEV;
1428         }
1429         pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1430                                                         resource_size(res));
1431         if (!pdata->ring_csr_addr) {
1432                 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1433                 return -ENOMEM;
1434         }
1435
1436         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1437         if (!res) {
1438                 dev_err(dev, "Resource ring_cmd not defined\n");
1439                 return -ENODEV;
1440         }
1441         pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1442                                                         resource_size(res));
1443         if (!pdata->ring_cmd_addr) {
1444                 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1445                 return -ENOMEM;
1446         }
1447
1448         if (dev->of_node)
1449                 xgene_get_port_id_dt(dev, pdata);
1450 #ifdef CONFIG_ACPI
1451         else
1452                 xgene_get_port_id_acpi(dev, pdata);
1453 #endif
1454
1455         if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1456                 eth_hw_addr_random(ndev);
1457
1458         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1459
1460         pdata->phy_mode = device_get_phy_mode(dev);
1461         if (pdata->phy_mode < 0) {
1462                 dev_err(dev, "Unable to get phy-connection-type\n");
1463                 return pdata->phy_mode;
1464         }
1465         if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1466             pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1467             pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1468                 dev_err(dev, "Incorrect phy-connection-type specified\n");
1469                 return -ENODEV;
1470         }
1471
1472         ret = xgene_get_tx_delay(pdata);
1473         if (ret)
1474                 return ret;
1475
1476         ret = xgene_get_rx_delay(pdata);
1477         if (ret)
1478                 return ret;
1479
1480         ret = xgene_enet_get_irqs(pdata);
1481         if (ret)
1482                 return ret;
1483
1484         ret = xgene_enet_check_phy_handle(pdata);
1485         if (ret)
1486                 return ret;
1487
1488         xgene_enet_gpiod_get(pdata);
1489
1490         pdata->clk = devm_clk_get(&pdev->dev, NULL);
1491         if (IS_ERR(pdata->clk)) {
1492                 /* Firmware may have set up the clock already. */
1493                 dev_info(dev, "clocks have been setup already\n");
1494         }
1495
1496         if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1497                 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1498         else
1499                 base_addr = pdata->base_addr;
1500         pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1501         pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1502         pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1503         pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1504         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1505             pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1506                 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1507                 offset = (pdata->enet_id == XGENE_ENET1) ?
1508                           BLOCK_ETH_MAC_CSR_OFFSET :
1509                           X2_BLOCK_ETH_MAC_CSR_OFFSET;
1510                 pdata->mcx_mac_csr_addr = base_addr + offset;
1511         } else {
1512                 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1513                 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1514                 pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
1515         }
1516         pdata->rx_buff_cnt = NUM_PKT_BUF;
1517
1518         return 0;
1519 }
1520
1521 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1522 {
1523         struct xgene_enet_cle *enet_cle = &pdata->cle;
1524         struct net_device *ndev = pdata->ndev;
1525         struct xgene_enet_desc_ring *buf_pool;
1526         u16 dst_ring_num;
1527         int i, ret;
1528
1529         ret = pdata->port_ops->reset(pdata);
1530         if (ret)
1531                 return ret;
1532
1533         ret = xgene_enet_create_desc_rings(ndev);
1534         if (ret) {
1535                 netdev_err(ndev, "Error in ring configuration\n");
1536                 return ret;
1537         }
1538
1539         /* setup buffer pool */
1540         for (i = 0; i < pdata->rxq_cnt; i++) {
1541                 buf_pool = pdata->rx_ring[i]->buf_pool;
1542                 xgene_enet_init_bufpool(buf_pool);
1543                 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1544                 if (ret)
1545                         goto err;
1546         }
1547
1548         dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1549         buf_pool = pdata->rx_ring[0]->buf_pool;
1550         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1551                 /* Initialize and Enable  PreClassifier Tree */
1552                 enet_cle->max_nodes = 512;
1553                 enet_cle->max_dbptrs = 1024;
1554                 enet_cle->parsers = 3;
1555                 enet_cle->active_parser = PARSER_ALL;
1556                 enet_cle->ptree.start_node = 0;
1557                 enet_cle->ptree.start_dbptr = 0;
1558                 enet_cle->jump_bytes = 8;
1559                 ret = pdata->cle_ops->cle_init(pdata);
1560                 if (ret) {
1561                         netdev_err(ndev, "Preclass Tree init error\n");
1562                         goto err;
1563                 }
1564         } else {
1565                 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1566         }
1567
1568         pdata->phy_speed = SPEED_UNKNOWN;
1569         pdata->mac_ops->init(pdata);
1570
1571         return ret;
1572
1573 err:
1574         xgene_enet_delete_desc_rings(pdata);
1575         return ret;
1576 }
1577
1578 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1579 {
1580         switch (pdata->phy_mode) {
1581         case PHY_INTERFACE_MODE_RGMII:
1582                 pdata->mac_ops = &xgene_gmac_ops;
1583                 pdata->port_ops = &xgene_gport_ops;
1584                 pdata->rm = RM3;
1585                 pdata->rxq_cnt = 1;
1586                 pdata->txq_cnt = 1;
1587                 pdata->cq_cnt = 0;
1588                 break;
1589         case PHY_INTERFACE_MODE_SGMII:
1590                 pdata->mac_ops = &xgene_sgmac_ops;
1591                 pdata->port_ops = &xgene_sgport_ops;
1592                 pdata->rm = RM1;
1593                 pdata->rxq_cnt = 1;
1594                 pdata->txq_cnt = 1;
1595                 pdata->cq_cnt = 1;
1596                 break;
1597         default:
1598                 pdata->mac_ops = &xgene_xgmac_ops;
1599                 pdata->port_ops = &xgene_xgport_ops;
1600                 pdata->cle_ops = &xgene_cle3in_ops;
1601                 pdata->rm = RM0;
1602                 if (!pdata->rxq_cnt) {
1603                         pdata->rxq_cnt = XGENE_NUM_RX_RING;
1604                         pdata->txq_cnt = XGENE_NUM_TX_RING;
1605                         pdata->cq_cnt = XGENE_NUM_TXC_RING;
1606                 }
1607                 break;
1608         }
1609
1610         if (pdata->enet_id == XGENE_ENET1) {
1611                 switch (pdata->port_id) {
1612                 case 0:
1613                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1614                                 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1615                                 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1616                                 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1617                                 pdata->ring_num = START_RING_NUM_0;
1618                         } else {
1619                                 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1620                                 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1621                                 pdata->bp_bufnum = START_BP_BUFNUM_0;
1622                                 pdata->ring_num = START_RING_NUM_0;
1623                         }
1624                         break;
1625                 case 1:
1626                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1627                                 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1628                                 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1629                                 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1630                                 pdata->ring_num = XG_START_RING_NUM_1;
1631                         } else {
1632                                 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1633                                 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1634                                 pdata->bp_bufnum = START_BP_BUFNUM_1;
1635                                 pdata->ring_num = START_RING_NUM_1;
1636                         }
1637                         break;
1638                 default:
1639                         break;
1640                 }
1641                 pdata->ring_ops = &xgene_ring1_ops;
1642         } else {
1643                 switch (pdata->port_id) {
1644                 case 0:
1645                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1646                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1647                         pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1648                         pdata->ring_num = X2_START_RING_NUM_0;
1649                         break;
1650                 case 1:
1651                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1652                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1653                         pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1654                         pdata->ring_num = X2_START_RING_NUM_1;
1655                         break;
1656                 default:
1657                         break;
1658                 }
1659                 pdata->rm = RM0;
1660                 pdata->ring_ops = &xgene_ring2_ops;
1661         }
1662 }
1663
1664 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1665 {
1666         struct napi_struct *napi;
1667         int i;
1668
1669         for (i = 0; i < pdata->rxq_cnt; i++) {
1670                 napi = &pdata->rx_ring[i]->napi;
1671                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1672                                NAPI_POLL_WEIGHT);
1673         }
1674
1675         for (i = 0; i < pdata->cq_cnt; i++) {
1676                 napi = &pdata->tx_ring[i]->cp_ring->napi;
1677                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1678                                NAPI_POLL_WEIGHT);
1679         }
1680 }
1681
1682 #ifdef CONFIG_ACPI
1683 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1684         { "APMC0D05", XGENE_ENET1},
1685         { "APMC0D30", XGENE_ENET1},
1686         { "APMC0D31", XGENE_ENET1},
1687         { "APMC0D3F", XGENE_ENET1},
1688         { "APMC0D26", XGENE_ENET2},
1689         { "APMC0D25", XGENE_ENET2},
1690         { }
1691 };
1692 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1693 #endif
1694
1695 static const struct of_device_id xgene_enet_of_match[] = {
1696         {.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
1697         {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1698         {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1699         {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1700         {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1701         {},
1702 };
1703
1704 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1705
1706 static int xgene_enet_probe(struct platform_device *pdev)
1707 {
1708         struct net_device *ndev;
1709         struct xgene_enet_pdata *pdata;
1710         struct device *dev = &pdev->dev;
1711         void (*link_state)(struct work_struct *);
1712         const struct of_device_id *of_id;
1713         int ret;
1714
1715         ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1716                                   XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
1717         if (!ndev)
1718                 return -ENOMEM;
1719
1720         pdata = netdev_priv(ndev);
1721
1722         pdata->pdev = pdev;
1723         pdata->ndev = ndev;
1724         SET_NETDEV_DEV(ndev, dev);
1725         platform_set_drvdata(pdev, pdata);
1726         ndev->netdev_ops = &xgene_ndev_ops;
1727         xgene_enet_set_ethtool_ops(ndev);
1728         ndev->features |= NETIF_F_IP_CSUM |
1729                           NETIF_F_GSO |
1730                           NETIF_F_GRO |
1731                           NETIF_F_SG;
1732
1733         of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1734         if (of_id) {
1735                 pdata->enet_id = (enum xgene_enet_id)of_id->data;
1736         }
1737 #ifdef CONFIG_ACPI
1738         else {
1739                 const struct acpi_device_id *acpi_id;
1740
1741                 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1742                 if (acpi_id)
1743                         pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
1744         }
1745 #endif
1746         if (!pdata->enet_id) {
1747                 ret = -ENODEV;
1748                 goto err;
1749         }
1750
1751         ret = xgene_enet_get_resources(pdata);
1752         if (ret)
1753                 goto err;
1754
1755         xgene_enet_setup_ops(pdata);
1756
1757         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1758                 ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
1759                 spin_lock_init(&pdata->mss_lock);
1760         }
1761         ndev->hw_features = ndev->features;
1762
1763         ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1764         if (ret) {
1765                 netdev_err(ndev, "No usable DMA configuration\n");
1766                 goto err;
1767         }
1768
1769         ret = xgene_enet_init_hw(pdata);
1770         if (ret)
1771                 goto err;
1772
1773         link_state = pdata->mac_ops->link_state;
1774         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1775                 INIT_DELAYED_WORK(&pdata->link_work, link_state);
1776         } else if (!pdata->mdio_driver) {
1777                 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1778                         ret = xgene_enet_mdio_config(pdata);
1779                 else
1780                         INIT_DELAYED_WORK(&pdata->link_work, link_state);
1781
1782                 if (ret)
1783                         goto err1;
1784         }
1785
1786         xgene_enet_napi_add(pdata);
1787         ret = register_netdev(ndev);
1788         if (ret) {
1789                 netdev_err(ndev, "Failed to register netdev\n");
1790                 goto err2;
1791         }
1792
1793         return 0;
1794
1795 err2:
1796         /*
1797          * If necessary, free_netdev() will call netif_napi_del() and undo
1798          * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
1799          */
1800
1801         if (pdata->mdio_driver)
1802                 xgene_enet_phy_disconnect(pdata);
1803         else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1804                 xgene_enet_mdio_remove(pdata);
1805 err1:
1806         xgene_enet_delete_desc_rings(pdata);
1807 err:
1808         free_netdev(ndev);
1809         return ret;
1810 }
1811
1812 static int xgene_enet_remove(struct platform_device *pdev)
1813 {
1814         struct xgene_enet_pdata *pdata;
1815         struct net_device *ndev;
1816
1817         pdata = platform_get_drvdata(pdev);
1818         ndev = pdata->ndev;
1819
1820         rtnl_lock();
1821         if (netif_running(ndev))
1822                 dev_close(ndev);
1823         rtnl_unlock();
1824
1825         if (pdata->mdio_driver)
1826                 xgene_enet_phy_disconnect(pdata);
1827         else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1828                 xgene_enet_mdio_remove(pdata);
1829
1830         unregister_netdev(ndev);
1831         pdata->port_ops->shutdown(pdata);
1832         xgene_enet_delete_desc_rings(pdata);
1833         free_netdev(ndev);
1834
1835         return 0;
1836 }
1837
1838 static void xgene_enet_shutdown(struct platform_device *pdev)
1839 {
1840         struct xgene_enet_pdata *pdata;
1841
1842         pdata = platform_get_drvdata(pdev);
1843         if (!pdata)
1844                 return;
1845
1846         if (!pdata->ndev)
1847                 return;
1848
1849         xgene_enet_remove(pdev);
1850 }
1851
1852 static struct platform_driver xgene_enet_driver = {
1853         .driver = {
1854                    .name = "xgene-enet",
1855                    .of_match_table = of_match_ptr(xgene_enet_of_match),
1856                    .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1857         },
1858         .probe = xgene_enet_probe,
1859         .remove = xgene_enet_remove,
1860         .shutdown = xgene_enet_shutdown,
1861 };
1862
1863 module_platform_driver(xgene_enet_driver);
1864
1865 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1866 MODULE_VERSION(XGENE_DRV_VERSION);
1867 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1868 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1869 MODULE_LICENSE("GPL");