1 // SPDX-License-Identifier: GPL-2.0
3 * This file is based on code from OCTEON SDK by Cavium Networks.
5 * Copyright (c) 2003-2010 Cavium Networks
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/cache.h>
11 #include <linux/cpumask.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
15 #include <linux/string.h>
16 #include <linux/prefetch.h>
17 #include <linux/ratelimit.h>
18 #include <linux/smp.h>
19 #include <linux/interrupt.h>
22 #include <linux/xfrm.h>
24 #endif /* CONFIG_XFRM */
26 #include <asm/octeon/octeon.h>
28 #include "ethernet-defines.h"
29 #include "ethernet-mem.h"
30 #include "ethernet-rx.h"
31 #include "octeon-ethernet.h"
32 #include "ethernet-util.h"
34 #include <asm/octeon/cvmx-helper.h>
35 #include <asm/octeon/cvmx-wqe.h>
36 #include <asm/octeon/cvmx-fau.h>
37 #include <asm/octeon/cvmx-pow.h>
38 #include <asm/octeon/cvmx-pip.h>
39 #include <asm/octeon/cvmx-scratch.h>
41 #include <asm/octeon/cvmx-gmxx-defs.h>
43 static atomic_t oct_rx_ready = ATOMIC_INIT(0);
45 static struct oct_rx_group {
48 struct napi_struct napi;
52 * cvm_oct_do_interrupt - interrupt handler.
53 * @irq: Interrupt number.
54 * @napi_id: Cookie to identify the NAPI instance.
56 * The interrupt occurs whenever the POW has packets in our group.
59 static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
61 /* Disable the IRQ and start napi_poll. */
62 disable_irq_nosync(irq);
63 napi_schedule(napi_id);
69 * cvm_oct_check_rcv_error - process receive errors
70 * @work: Work queue entry pointing to the packet.
72 * Returns Non-zero if the packet can be dropped, zero otherwise.
74 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
78 if (octeon_has_feature(OCTEON_FEATURE_PKND))
79 port = work->word0.pip.cn68xx.pknd;
81 port = work->word1.cn38xx.ipprt;
83 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64))
85 * Ignore length errors on min size packets. Some
86 * equipment incorrectly pads packets to 64+4FCS
87 * instead of 60+4FCS. Note these packets still get
88 * counted as frame errors.
92 if (work->word2.snoip.err_code == 5 ||
93 work->word2.snoip.err_code == 7) {
95 * We received a packet with either an alignment error
96 * or a FCS error. This may be signalling that we are
97 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
98 * off. If this is the case we need to parse the
99 * packet to determine if we can remove a non spec
100 * preamble and generate a correct packet.
102 int interface = cvmx_helper_get_interface_num(port);
103 int index = cvmx_helper_get_interface_index_num(port);
104 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
106 gmxx_rxx_frm_ctl.u64 =
107 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
108 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
110 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
113 while (i < work->word1.len - 1) {
121 /* Port received 0xd5 preamble */
122 work->packet_ptr.s.addr += i + 1;
123 work->word1.len -= i + 5;
127 if ((*ptr & 0xf) == 0xd) {
128 /* Port received 0xd preamble */
129 work->packet_ptr.s.addr += i;
130 work->word1.len -= i + 4;
131 for (i = 0; i < work->word1.len; i++) {
133 ((*ptr & 0xf0) >> 4) |
134 ((*(ptr + 1) & 0xf) << 4);
140 printk_ratelimited("Port %d unknown preamble, packet dropped\n",
142 cvm_oct_free_work(work);
147 printk_ratelimited("Port %d receive error code %d, packet dropped\n",
148 port, work->word2.snoip.err_code);
149 cvm_oct_free_work(work);
153 static void copy_segments_to_skb(cvmx_wqe_t *work, struct sk_buff *skb)
155 int segments = work->word2.s.bufs;
156 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
157 int len = work->word1.len;
161 union cvmx_buf_ptr next_ptr;
163 next_ptr = *(union cvmx_buf_ptr *)
164 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
167 * Octeon Errata PKI-100: The segment size is wrong.
169 * Until it is fixed, calculate the segment size based on
170 * the packet pool buffer size.
171 * When it is fixed, the following line should be replaced
173 * int segment_size = segment_ptr.s.size;
176 CVMX_FPA_PACKET_POOL_SIZE -
177 (segment_ptr.s.addr -
178 (((segment_ptr.s.addr >> 7) -
179 segment_ptr.s.back) << 7));
181 /* Don't copy more than what is left in the packet */
182 if (segment_size > len)
185 /* Copy the data into the packet */
186 skb_put_data(skb, cvmx_phys_to_ptr(segment_ptr.s.addr),
189 segment_ptr = next_ptr;
193 static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
195 const int coreid = cvmx_get_core_num();
199 int did_work_request = 0;
200 int packet_not_copied;
202 /* Prefetch cvm_oct_device since we know we need it soon */
203 prefetch(cvm_oct_device);
205 if (USE_ASYNC_IOBDMA) {
206 /* Save scratch in case userspace is using it */
208 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
211 /* Only allow work for our group (and preserve priorities) */
212 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
213 old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
214 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
215 BIT(rx_group->group));
216 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
218 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
219 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
220 (old_group_mask & ~0xFFFFull) |
221 BIT(rx_group->group));
224 if (USE_ASYNC_IOBDMA) {
225 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
226 did_work_request = 1;
229 while (rx_count < budget) {
230 struct sk_buff *skb = NULL;
231 struct sk_buff **pskb = NULL;
236 if (USE_ASYNC_IOBDMA && did_work_request)
237 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
239 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
242 did_work_request = 0;
244 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
245 cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
246 BIT(rx_group->group));
247 cvmx_write_csr(CVMX_SSO_WQ_INT,
248 BIT(rx_group->group));
250 union cvmx_pow_wq_int wq_int;
253 wq_int.s.iq_dis = BIT(rx_group->group);
254 wq_int.s.wq_int = BIT(rx_group->group);
255 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
259 pskb = (struct sk_buff **)
260 (cvm_oct_get_buffer_ptr(work->packet_ptr) -
264 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
265 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
267 did_work_request = 1;
271 skb_in_hw = work->word2.s.bufs == 1;
272 if (likely(skb_in_hw)) {
274 prefetch(&skb->head);
278 if (octeon_has_feature(OCTEON_FEATURE_PKND))
279 port = work->word0.pip.cn68xx.pknd;
281 port = work->word1.cn38xx.ipprt;
283 prefetch(cvm_oct_device[port]);
285 /* Immediately throw away all packets with receive errors */
286 if (unlikely(work->word2.snoip.rcv_error)) {
287 if (cvm_oct_check_rcv_error(work))
292 * We can only use the zero copy path if skbuffs are
293 * in the FPA pool and the packet fits in a single
296 if (likely(skb_in_hw)) {
297 skb->data = skb->head + work->packet_ptr.s.addr -
298 cvmx_ptr_to_phys(skb->head);
300 skb->len = work->word1.len;
301 skb_set_tail_pointer(skb, skb->len);
302 packet_not_copied = 1;
305 * We have to copy the packet. First allocate
308 skb = dev_alloc_skb(work->word1.len);
310 cvm_oct_free_work(work);
315 * Check if we've received a packet that was
316 * entirely stored in the work entry.
318 if (unlikely(work->word2.s.bufs == 0)) {
319 u8 *ptr = work->packet_data;
321 if (likely(!work->word2.s.not_IP)) {
323 * The beginning of the packet
324 * moves for IP packets.
326 if (work->word2.s.is_v6)
331 skb_put_data(skb, ptr, work->word1.len);
332 /* No packet buffers to free */
334 copy_segments_to_skb(work, skb);
336 packet_not_copied = 0;
338 if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
339 cvm_oct_device[port])) {
340 struct net_device *dev = cvm_oct_device[port];
343 * Only accept packets for devices that are
346 if (likely(dev->flags & IFF_UP)) {
347 skb->protocol = eth_type_trans(skb, dev);
350 if (unlikely(work->word2.s.not_IP ||
351 work->word2.s.IP_exc ||
352 work->word2.s.L4_error ||
353 !work->word2.s.tcp_or_udp))
354 skb->ip_summed = CHECKSUM_NONE;
356 skb->ip_summed = CHECKSUM_UNNECESSARY;
358 /* Increment RX stats for virtual ports */
359 if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
360 dev->stats.rx_packets++;
361 dev->stats.rx_bytes += skb->len;
363 netif_receive_skb(skb);
366 * Drop any packet received for a device that
369 dev->stats.rx_dropped++;
370 dev_kfree_skb_irq(skb);
374 * Drop any packet received for a device that
377 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
379 dev_kfree_skb_irq(skb);
382 * Check to see if the skbuff and work share the same
385 if (likely(packet_not_copied)) {
387 * This buffer needs to be replaced, increment
388 * the number of buffers we need to free by
391 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
394 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
396 cvm_oct_free_work(work);
399 /* Restore the original POW group mask */
400 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
401 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
402 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
404 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
407 if (USE_ASYNC_IOBDMA) {
408 /* Restore the scratch area */
409 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
411 cvm_oct_rx_refill_pool(0);
417 * cvm_oct_napi_poll - the NAPI poll function.
418 * @napi: The NAPI instance.
419 * @budget: Maximum number of packets to receive.
421 * Returns the number of packets processed.
423 static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
425 struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group,
429 rx_count = cvm_oct_poll(rx_group, budget);
431 if (rx_count < budget) {
433 napi_complete_done(napi, rx_count);
434 enable_irq(rx_group->irq);
439 #ifdef CONFIG_NET_POLL_CONTROLLER
441 * cvm_oct_poll_controller - poll for receive packets
444 * @dev: Device to poll. Unused
446 void cvm_oct_poll_controller(struct net_device *dev)
450 if (!atomic_read(&oct_rx_ready))
453 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
454 if (!(pow_receive_groups & BIT(i)))
457 cvm_oct_poll(&oct_rx_group[i], 16);
462 void cvm_oct_rx_initialize(void)
465 struct net_device *dev_for_napi = NULL;
467 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
468 if (cvm_oct_device[i]) {
469 dev_for_napi = cvm_oct_device[i];
475 panic("No net_devices were allocated.");
477 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
480 if (!(pow_receive_groups & BIT(i)))
483 netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
484 cvm_oct_napi_poll, rx_napi_weight);
485 napi_enable(&oct_rx_group[i].napi);
487 oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
488 oct_rx_group[i].group = i;
490 /* Register an IRQ handler to receive POW interrupts */
491 ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
492 "Ethernet", &oct_rx_group[i].napi);
494 panic("Could not acquire Ethernet IRQ %d\n",
495 oct_rx_group[i].irq);
497 disable_irq_nosync(oct_rx_group[i].irq);
499 /* Enable POW interrupt when our port has at least one packet */
500 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
501 union cvmx_sso_wq_int_thrx int_thr;
502 union cvmx_pow_wq_int_pc int_pc;
506 int_thr.s.tc_thr = 1;
507 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
511 cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
513 union cvmx_pow_wq_int_thrx int_thr;
514 union cvmx_pow_wq_int_pc int_pc;
518 int_thr.s.tc_thr = 1;
519 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
523 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
526 /* Schedule NAPI now. This will indirectly enable the
529 napi_schedule(&oct_rx_group[i].napi);
531 atomic_inc(&oct_rx_ready);
534 void cvm_oct_rx_shutdown(void)
538 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
539 if (!(pow_receive_groups & BIT(i)))
542 /* Disable POW interrupt */
543 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
544 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
546 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
548 /* Free the interrupt handler */
549 free_irq(oct_rx_group[i].irq, cvm_oct_device);
551 netif_napi_del(&oct_rx_group[i].napi);