GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / staging / octeon / ethernet-rx.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file is based on code from OCTEON SDK by Cavium Networks.
4  *
5  * Copyright (c) 2003-2010 Cavium Networks
6  */
7
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/cache.h>
11 #include <linux/cpumask.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ip.h>
15 #include <linux/string.h>
16 #include <linux/prefetch.h>
17 #include <linux/ratelimit.h>
18 #include <linux/smp.h>
19 #include <linux/interrupt.h>
20 #include <net/dst.h>
21 #ifdef CONFIG_XFRM
22 #include <linux/xfrm.h>
23 #include <net/xfrm.h>
24 #endif /* CONFIG_XFRM */
25
26 #include <asm/octeon/octeon.h>
27
28 #include "ethernet-defines.h"
29 #include "ethernet-mem.h"
30 #include "ethernet-rx.h"
31 #include "octeon-ethernet.h"
32 #include "ethernet-util.h"
33
34 #include <asm/octeon/cvmx-helper.h>
35 #include <asm/octeon/cvmx-wqe.h>
36 #include <asm/octeon/cvmx-fau.h>
37 #include <asm/octeon/cvmx-pow.h>
38 #include <asm/octeon/cvmx-pip.h>
39 #include <asm/octeon/cvmx-scratch.h>
40
41 #include <asm/octeon/cvmx-gmxx-defs.h>
42
43 static atomic_t oct_rx_ready = ATOMIC_INIT(0);
44
45 static struct oct_rx_group {
46         int irq;
47         int group;
48         struct napi_struct napi;
49 } oct_rx_group[16];
50
51 /**
52  * cvm_oct_do_interrupt - interrupt handler.
53  * @irq: Interrupt number.
54  * @napi_id: Cookie to identify the NAPI instance.
55  *
56  * The interrupt occurs whenever the POW has packets in our group.
57  *
58  */
59 static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
60 {
61         /* Disable the IRQ and start napi_poll. */
62         disable_irq_nosync(irq);
63         napi_schedule(napi_id);
64
65         return IRQ_HANDLED;
66 }
67
68 /**
69  * cvm_oct_check_rcv_error - process receive errors
70  * @work: Work queue entry pointing to the packet.
71  *
72  * Returns Non-zero if the packet can be dropped, zero otherwise.
73  */
74 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
75 {
76         int port;
77
78         if (octeon_has_feature(OCTEON_FEATURE_PKND))
79                 port = work->word0.pip.cn68xx.pknd;
80         else
81                 port = work->word1.cn38xx.ipprt;
82
83         if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64))
84                 /*
85                  * Ignore length errors on min size packets. Some
86                  * equipment incorrectly pads packets to 64+4FCS
87                  * instead of 60+4FCS.  Note these packets still get
88                  * counted as frame errors.
89                  */
90                 return 0;
91
92         if (work->word2.snoip.err_code == 5 ||
93             work->word2.snoip.err_code == 7) {
94                 /*
95                  * We received a packet with either an alignment error
96                  * or a FCS error. This may be signalling that we are
97                  * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
98                  * off. If this is the case we need to parse the
99                  * packet to determine if we can remove a non spec
100                  * preamble and generate a correct packet.
101                  */
102                 int interface = cvmx_helper_get_interface_num(port);
103                 int index = cvmx_helper_get_interface_index_num(port);
104                 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
105
106                 gmxx_rxx_frm_ctl.u64 =
107                     cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
108                 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
109                         u8 *ptr =
110                             cvmx_phys_to_ptr(work->packet_ptr.s.addr);
111                         int i = 0;
112
113                         while (i < work->word1.len - 1) {
114                                 if (*ptr != 0x55)
115                                         break;
116                                 ptr++;
117                                 i++;
118                         }
119
120                         if (*ptr == 0xd5) {
121                                 /* Port received 0xd5 preamble */
122                                 work->packet_ptr.s.addr += i + 1;
123                                 work->word1.len -= i + 5;
124                                 return 0;
125                         }
126
127                         if ((*ptr & 0xf) == 0xd) {
128                                 /* Port received 0xd preamble */
129                                 work->packet_ptr.s.addr += i;
130                                 work->word1.len -= i + 4;
131                                 for (i = 0; i < work->word1.len; i++) {
132                                         *ptr =
133                                             ((*ptr & 0xf0) >> 4) |
134                                             ((*(ptr + 1) & 0xf) << 4);
135                                         ptr++;
136                                 }
137                                 return 0;
138                         }
139
140                         printk_ratelimited("Port %d unknown preamble, packet dropped\n",
141                                            port);
142                         cvm_oct_free_work(work);
143                         return 1;
144                 }
145         }
146
147         printk_ratelimited("Port %d receive error code %d, packet dropped\n",
148                            port, work->word2.snoip.err_code);
149         cvm_oct_free_work(work);
150         return 1;
151 }
152
153 static void copy_segments_to_skb(cvmx_wqe_t *work, struct sk_buff *skb)
154 {
155         int segments = work->word2.s.bufs;
156         union cvmx_buf_ptr segment_ptr = work->packet_ptr;
157         int len = work->word1.len;
158         int segment_size;
159
160         while (segments--) {
161                 union cvmx_buf_ptr next_ptr;
162
163                 next_ptr = *(union cvmx_buf_ptr *)
164                         cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
165
166                 /*
167                  * Octeon Errata PKI-100: The segment size is wrong.
168                  *
169                  * Until it is fixed, calculate the segment size based on
170                  * the packet pool buffer size.
171                  * When it is fixed, the following line should be replaced
172                  * with this one:
173                  * int segment_size = segment_ptr.s.size;
174                  */
175                 segment_size =
176                         CVMX_FPA_PACKET_POOL_SIZE -
177                         (segment_ptr.s.addr -
178                          (((segment_ptr.s.addr >> 7) -
179                            segment_ptr.s.back) << 7));
180
181                 /* Don't copy more than what is left in the packet */
182                 if (segment_size > len)
183                         segment_size = len;
184
185                 /* Copy the data into the packet */
186                 skb_put_data(skb, cvmx_phys_to_ptr(segment_ptr.s.addr),
187                              segment_size);
188                 len -= segment_size;
189                 segment_ptr = next_ptr;
190         }
191 }
192
193 static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
194 {
195         const int       coreid = cvmx_get_core_num();
196         u64     old_group_mask;
197         u64     old_scratch;
198         int             rx_count = 0;
199         int             did_work_request = 0;
200         int             packet_not_copied;
201
202         /* Prefetch cvm_oct_device since we know we need it soon */
203         prefetch(cvm_oct_device);
204
205         if (USE_ASYNC_IOBDMA) {
206                 /* Save scratch in case userspace is using it */
207                 CVMX_SYNCIOBDMA;
208                 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
209         }
210
211         /* Only allow work for our group (and preserve priorities) */
212         if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
213                 old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
214                 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
215                                BIT(rx_group->group));
216                 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
217         } else {
218                 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
219                 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
220                                (old_group_mask & ~0xFFFFull) |
221                                BIT(rx_group->group));
222         }
223
224         if (USE_ASYNC_IOBDMA) {
225                 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
226                 did_work_request = 1;
227         }
228
229         while (rx_count < budget) {
230                 struct sk_buff *skb = NULL;
231                 struct sk_buff **pskb = NULL;
232                 int skb_in_hw;
233                 cvmx_wqe_t *work;
234                 int port;
235
236                 if (USE_ASYNC_IOBDMA && did_work_request)
237                         work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
238                 else
239                         work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
240
241                 prefetch(work);
242                 did_work_request = 0;
243                 if (!work) {
244                         if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
245                                 cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
246                                                BIT(rx_group->group));
247                                 cvmx_write_csr(CVMX_SSO_WQ_INT,
248                                                BIT(rx_group->group));
249                         } else {
250                                 union cvmx_pow_wq_int wq_int;
251
252                                 wq_int.u64 = 0;
253                                 wq_int.s.iq_dis = BIT(rx_group->group);
254                                 wq_int.s.wq_int = BIT(rx_group->group);
255                                 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
256                         }
257                         break;
258                 }
259                 pskb = (struct sk_buff **)
260                         (cvm_oct_get_buffer_ptr(work->packet_ptr) -
261                         sizeof(void *));
262                 prefetch(pskb);
263
264                 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
265                         cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
266                                                             CVMX_POW_NO_WAIT);
267                         did_work_request = 1;
268                 }
269                 rx_count++;
270
271                 skb_in_hw = work->word2.s.bufs == 1;
272                 if (likely(skb_in_hw)) {
273                         skb = *pskb;
274                         prefetch(&skb->head);
275                         prefetch(&skb->len);
276                 }
277
278                 if (octeon_has_feature(OCTEON_FEATURE_PKND))
279                         port = work->word0.pip.cn68xx.pknd;
280                 else
281                         port = work->word1.cn38xx.ipprt;
282
283                 prefetch(cvm_oct_device[port]);
284
285                 /* Immediately throw away all packets with receive errors */
286                 if (unlikely(work->word2.snoip.rcv_error)) {
287                         if (cvm_oct_check_rcv_error(work))
288                                 continue;
289                 }
290
291                 /*
292                  * We can only use the zero copy path if skbuffs are
293                  * in the FPA pool and the packet fits in a single
294                  * buffer.
295                  */
296                 if (likely(skb_in_hw)) {
297                         skb->data = skb->head + work->packet_ptr.s.addr -
298                                 cvmx_ptr_to_phys(skb->head);
299                         prefetch(skb->data);
300                         skb->len = work->word1.len;
301                         skb_set_tail_pointer(skb, skb->len);
302                         packet_not_copied = 1;
303                 } else {
304                         /*
305                          * We have to copy the packet. First allocate
306                          * an skbuff for it.
307                          */
308                         skb = dev_alloc_skb(work->word1.len);
309                         if (!skb) {
310                                 cvm_oct_free_work(work);
311                                 continue;
312                         }
313
314                         /*
315                          * Check if we've received a packet that was
316                          * entirely stored in the work entry.
317                          */
318                         if (unlikely(work->word2.s.bufs == 0)) {
319                                 u8 *ptr = work->packet_data;
320
321                                 if (likely(!work->word2.s.not_IP)) {
322                                         /*
323                                          * The beginning of the packet
324                                          * moves for IP packets.
325                                          */
326                                         if (work->word2.s.is_v6)
327                                                 ptr += 2;
328                                         else
329                                                 ptr += 6;
330                                 }
331                                 skb_put_data(skb, ptr, work->word1.len);
332                                 /* No packet buffers to free */
333                         } else {
334                                 copy_segments_to_skb(work, skb);
335                         }
336                         packet_not_copied = 0;
337                 }
338                 if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
339                            cvm_oct_device[port])) {
340                         struct net_device *dev = cvm_oct_device[port];
341
342                         /*
343                          * Only accept packets for devices that are
344                          * currently up.
345                          */
346                         if (likely(dev->flags & IFF_UP)) {
347                                 skb->protocol = eth_type_trans(skb, dev);
348                                 skb->dev = dev;
349
350                                 if (unlikely(work->word2.s.not_IP ||
351                                              work->word2.s.IP_exc ||
352                                              work->word2.s.L4_error ||
353                                              !work->word2.s.tcp_or_udp))
354                                         skb->ip_summed = CHECKSUM_NONE;
355                                 else
356                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
357
358                                 /* Increment RX stats for virtual ports */
359                                 if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
360                                         dev->stats.rx_packets++;
361                                         dev->stats.rx_bytes += skb->len;
362                                 }
363                                 netif_receive_skb(skb);
364                         } else {
365                                 /*
366                                  * Drop any packet received for a device that
367                                  * isn't up.
368                                  */
369                                 dev->stats.rx_dropped++;
370                                 dev_kfree_skb_irq(skb);
371                         }
372                 } else {
373                         /*
374                          * Drop any packet received for a device that
375                          * doesn't exist.
376                          */
377                         printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
378                                            port);
379                         dev_kfree_skb_irq(skb);
380                 }
381                 /*
382                  * Check to see if the skbuff and work share the same
383                  * packet buffer.
384                  */
385                 if (likely(packet_not_copied)) {
386                         /*
387                          * This buffer needs to be replaced, increment
388                          * the number of buffers we need to free by
389                          * one.
390                          */
391                         cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
392                                               1);
393
394                         cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
395                 } else {
396                         cvm_oct_free_work(work);
397                 }
398         }
399         /* Restore the original POW group mask */
400         if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
401                 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
402                 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
403         } else {
404                 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
405         }
406
407         if (USE_ASYNC_IOBDMA) {
408                 /* Restore the scratch area */
409                 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
410         }
411         cvm_oct_rx_refill_pool(0);
412
413         return rx_count;
414 }
415
416 /**
417  * cvm_oct_napi_poll - the NAPI poll function.
418  * @napi: The NAPI instance.
419  * @budget: Maximum number of packets to receive.
420  *
421  * Returns the number of packets processed.
422  */
423 static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
424 {
425         struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group,
426                                                      napi);
427         int rx_count;
428
429         rx_count = cvm_oct_poll(rx_group, budget);
430
431         if (rx_count < budget) {
432                 /* No more work */
433                 napi_complete_done(napi, rx_count);
434                 enable_irq(rx_group->irq);
435         }
436         return rx_count;
437 }
438
439 #ifdef CONFIG_NET_POLL_CONTROLLER
440 /**
441  * cvm_oct_poll_controller - poll for receive packets
442  * device.
443  *
444  * @dev:    Device to poll. Unused
445  */
446 void cvm_oct_poll_controller(struct net_device *dev)
447 {
448         int i;
449
450         if (!atomic_read(&oct_rx_ready))
451                 return;
452
453         for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
454                 if (!(pow_receive_groups & BIT(i)))
455                         continue;
456
457                 cvm_oct_poll(&oct_rx_group[i], 16);
458         }
459 }
460 #endif
461
462 void cvm_oct_rx_initialize(void)
463 {
464         int i;
465         struct net_device *dev_for_napi = NULL;
466
467         for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
468                 if (cvm_oct_device[i]) {
469                         dev_for_napi = cvm_oct_device[i];
470                         break;
471                 }
472         }
473
474         if (!dev_for_napi)
475                 panic("No net_devices were allocated.");
476
477         for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
478                 int ret;
479
480                 if (!(pow_receive_groups & BIT(i)))
481                         continue;
482
483                 netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
484                                cvm_oct_napi_poll, rx_napi_weight);
485                 napi_enable(&oct_rx_group[i].napi);
486
487                 oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
488                 oct_rx_group[i].group = i;
489
490                 /* Register an IRQ handler to receive POW interrupts */
491                 ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
492                                   "Ethernet", &oct_rx_group[i].napi);
493                 if (ret)
494                         panic("Could not acquire Ethernet IRQ %d\n",
495                               oct_rx_group[i].irq);
496
497                 disable_irq_nosync(oct_rx_group[i].irq);
498
499                 /* Enable POW interrupt when our port has at least one packet */
500                 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
501                         union cvmx_sso_wq_int_thrx int_thr;
502                         union cvmx_pow_wq_int_pc int_pc;
503
504                         int_thr.u64 = 0;
505                         int_thr.s.tc_en = 1;
506                         int_thr.s.tc_thr = 1;
507                         cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
508
509                         int_pc.u64 = 0;
510                         int_pc.s.pc_thr = 5;
511                         cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
512                 } else {
513                         union cvmx_pow_wq_int_thrx int_thr;
514                         union cvmx_pow_wq_int_pc int_pc;
515
516                         int_thr.u64 = 0;
517                         int_thr.s.tc_en = 1;
518                         int_thr.s.tc_thr = 1;
519                         cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
520
521                         int_pc.u64 = 0;
522                         int_pc.s.pc_thr = 5;
523                         cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
524                 }
525
526                 /* Schedule NAPI now. This will indirectly enable the
527                  * interrupt.
528                  */
529                 napi_schedule(&oct_rx_group[i].napi);
530         }
531         atomic_inc(&oct_rx_ready);
532 }
533
534 void cvm_oct_rx_shutdown(void)
535 {
536         int i;
537
538         for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
539                 if (!(pow_receive_groups & BIT(i)))
540                         continue;
541
542                 /* Disable POW interrupt */
543                 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
544                         cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
545                 else
546                         cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
547
548                 /* Free the interrupt handler */
549                 free_irq(oct_rx_group[i].irq, cvm_oct_device);
550
551                 netif_napi_del(&oct_rx_group[i].napi);
552         }
553 }