GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / net / ethernet / qlogic / qede / qede_filter.c
1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <net/udp_tunnel.h>
35 #include <linux/bitops.h>
36 #include <linux/vmalloc.h>
37
38 #include <linux/qed/qed_if.h>
39 #include "qede.h"
40
41 struct qede_arfs_tuple {
42         union {
43                 __be32 src_ipv4;
44                 struct in6_addr src_ipv6;
45         };
46         union {
47                 __be32 dst_ipv4;
48                 struct in6_addr dst_ipv6;
49         };
50         __be16  src_port;
51         __be16  dst_port;
52         __be16  eth_proto;
53         u8      ip_proto;
54 };
55
56 struct qede_arfs_fltr_node {
57 #define QEDE_FLTR_VALID  0
58         unsigned long state;
59
60         /* pointer to aRFS packet buffer */
61         void *data;
62
63         /* dma map address of aRFS packet buffer */
64         dma_addr_t mapping;
65
66         /* length of aRFS packet buffer */
67         int buf_len;
68
69         /* tuples to hold from aRFS packet buffer */
70         struct qede_arfs_tuple tuple;
71
72         u32 flow_id;
73         u16 sw_id;
74         u16 rxq_id;
75         u16 next_rxq_id;
76         bool filter_op;
77         bool used;
78         u8 fw_rc;
79         struct hlist_node node;
80 };
81
82 struct qede_arfs {
83 #define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx])
84 #define QEDE_ARFS_POLL_COUNT    100
85 #define QEDE_RFS_FLW_BITSHIFT   (4)
86 #define QEDE_RFS_FLW_MASK       ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
87         struct hlist_head       arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
88
89         /* lock for filter list access */
90         spinlock_t              arfs_list_lock;
91         unsigned long           *arfs_fltr_bmap;
92         int                     filter_count;
93         bool                    enable;
94 };
95
96 static void qede_configure_arfs_fltr(struct qede_dev *edev,
97                                      struct qede_arfs_fltr_node *n,
98                                      u16 rxq_id, bool add_fltr)
99 {
100         const struct qed_eth_ops *op = edev->ops;
101
102         if (n->used)
103                 return;
104
105         DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
106                    "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
107                    add_fltr ? "Adding" : "Deleting",
108                    n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
109                    ntohs(n->tuple.dst_port), rxq_id);
110
111         n->used = true;
112         n->filter_op = add_fltr;
113         op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
114                                  rxq_id, add_fltr);
115 }
116
117 static void
118 qede_free_arfs_filter(struct qede_dev *edev,  struct qede_arfs_fltr_node *fltr)
119 {
120         kfree(fltr->data);
121         clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
122         kfree(fltr);
123 }
124
125 static int
126 qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
127                                       struct qede_arfs_fltr_node *fltr,
128                                       u16 bucket_idx)
129 {
130         fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
131                                        fltr->buf_len, DMA_TO_DEVICE);
132         if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
133                 DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
134                 qede_free_arfs_filter(edev, fltr);
135                 return -ENOMEM;
136         }
137
138         INIT_HLIST_NODE(&fltr->node);
139         hlist_add_head(&fltr->node,
140                        QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
141         edev->arfs->filter_count++;
142
143         if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
144                 edev->ops->configure_arfs_searcher(edev->cdev, true);
145                 edev->arfs->enable = true;
146         }
147
148         return 0;
149 }
150
151 static void
152 qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
153                                       struct qede_arfs_fltr_node *fltr)
154 {
155         hlist_del(&fltr->node);
156         dma_unmap_single(&edev->pdev->dev, fltr->mapping,
157                          fltr->buf_len, DMA_TO_DEVICE);
158
159         qede_free_arfs_filter(edev, fltr);
160         edev->arfs->filter_count--;
161
162         if (!edev->arfs->filter_count && edev->arfs->enable) {
163                 edev->arfs->enable = false;
164                 edev->ops->configure_arfs_searcher(edev->cdev, false);
165         }
166 }
167
168 void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
169 {
170         struct qede_arfs_fltr_node *fltr = filter;
171         struct qede_dev *edev = dev;
172
173         fltr->fw_rc = fw_rc;
174
175         if (fw_rc) {
176                 DP_NOTICE(edev,
177                           "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
178                           fw_rc, fltr->flow_id, fltr->sw_id,
179                           ntohs(fltr->tuple.src_port),
180                           ntohs(fltr->tuple.dst_port), fltr->rxq_id);
181
182                 spin_lock_bh(&edev->arfs->arfs_list_lock);
183
184                 fltr->used = false;
185                 clear_bit(QEDE_FLTR_VALID, &fltr->state);
186
187                 spin_unlock_bh(&edev->arfs->arfs_list_lock);
188                 return;
189         }
190
191         spin_lock_bh(&edev->arfs->arfs_list_lock);
192
193         fltr->used = false;
194
195         if (fltr->filter_op) {
196                 set_bit(QEDE_FLTR_VALID, &fltr->state);
197                 if (fltr->rxq_id != fltr->next_rxq_id)
198                         qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
199                                                  false);
200         } else {
201                 clear_bit(QEDE_FLTR_VALID, &fltr->state);
202                 if (fltr->rxq_id != fltr->next_rxq_id) {
203                         fltr->rxq_id = fltr->next_rxq_id;
204                         qede_configure_arfs_fltr(edev, fltr,
205                                                  fltr->rxq_id, true);
206                 }
207         }
208
209         spin_unlock_bh(&edev->arfs->arfs_list_lock);
210 }
211
212 /* Should be called while qede_lock is held */
213 void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
214 {
215         int i;
216
217         for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
218                 struct hlist_node *temp;
219                 struct hlist_head *head;
220                 struct qede_arfs_fltr_node *fltr;
221
222                 head = &edev->arfs->arfs_hl_head[i];
223
224                 hlist_for_each_entry_safe(fltr, temp, head, node) {
225                         bool del = false;
226
227                         if (edev->state != QEDE_STATE_OPEN)
228                                 del = true;
229
230                         spin_lock_bh(&edev->arfs->arfs_list_lock);
231
232                         if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
233                              !fltr->used) || free_fltr) {
234                                 qede_dequeue_fltr_and_config_searcher(edev,
235                                                                       fltr);
236                         } else {
237                                 bool flow_exp = false;
238 #ifdef CONFIG_RFS_ACCEL
239                                 flow_exp = rps_may_expire_flow(edev->ndev,
240                                                                fltr->rxq_id,
241                                                                fltr->flow_id,
242                                                                fltr->sw_id);
243 #endif
244                                 if ((flow_exp || del) && !free_fltr)
245                                         qede_configure_arfs_fltr(edev, fltr,
246                                                                  fltr->rxq_id,
247                                                                  false);
248                         }
249
250                         spin_unlock_bh(&edev->arfs->arfs_list_lock);
251                 }
252         }
253
254         spin_lock_bh(&edev->arfs->arfs_list_lock);
255
256         if (!edev->arfs->filter_count) {
257                 if (edev->arfs->enable) {
258                         edev->arfs->enable = false;
259                         edev->ops->configure_arfs_searcher(edev->cdev, false);
260                 }
261 #ifdef CONFIG_RFS_ACCEL
262         } else {
263                 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
264                 schedule_delayed_work(&edev->sp_task,
265                                       QEDE_SP_TASK_POLL_DELAY);
266 #endif
267         }
268
269         spin_unlock_bh(&edev->arfs->arfs_list_lock);
270 }
271
272 /* This function waits until all aRFS filters get deleted and freed.
273  * On timeout it frees all filters forcefully.
274  */
275 void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
276 {
277         int count = QEDE_ARFS_POLL_COUNT;
278
279         while (count) {
280                 qede_process_arfs_filters(edev, false);
281
282                 if (!edev->arfs->filter_count)
283                         break;
284
285                 msleep(100);
286                 count--;
287         }
288
289         if (!count) {
290                 DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
291
292                 /* Something is terribly wrong, free forcefully */
293                 qede_process_arfs_filters(edev, true);
294         }
295 }
296
297 int qede_alloc_arfs(struct qede_dev *edev)
298 {
299         int i;
300
301         edev->arfs = vzalloc(sizeof(*edev->arfs));
302         if (!edev->arfs)
303                 return -ENOMEM;
304
305         spin_lock_init(&edev->arfs->arfs_list_lock);
306
307         for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
308                 INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i));
309
310         edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) *
311                                              sizeof(long));
312         if (!edev->arfs->arfs_fltr_bmap) {
313                 vfree(edev->arfs);
314                 edev->arfs = NULL;
315                 return -ENOMEM;
316         }
317
318 #ifdef CONFIG_RFS_ACCEL
319         edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
320         if (!edev->ndev->rx_cpu_rmap) {
321                 vfree(edev->arfs->arfs_fltr_bmap);
322                 edev->arfs->arfs_fltr_bmap = NULL;
323                 vfree(edev->arfs);
324                 edev->arfs = NULL;
325                 return -ENOMEM;
326         }
327 #endif
328         return 0;
329 }
330
331 void qede_free_arfs(struct qede_dev *edev)
332 {
333         if (!edev->arfs)
334                 return;
335
336 #ifdef CONFIG_RFS_ACCEL
337         if (edev->ndev->rx_cpu_rmap)
338                 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
339
340         edev->ndev->rx_cpu_rmap = NULL;
341 #endif
342         vfree(edev->arfs->arfs_fltr_bmap);
343         edev->arfs->arfs_fltr_bmap = NULL;
344         vfree(edev->arfs);
345         edev->arfs = NULL;
346 }
347
348 #ifdef CONFIG_RFS_ACCEL
349 static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
350                                  const struct sk_buff *skb)
351 {
352         if (skb->protocol == htons(ETH_P_IP)) {
353                 if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
354                     tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
355                         return true;
356                 else
357                         return false;
358         } else {
359                 struct in6_addr *src = &tpos->tuple.src_ipv6;
360                 u8 size = sizeof(struct in6_addr);
361
362                 if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
363                     !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
364                         return true;
365                 else
366                         return false;
367         }
368 }
369
370 static struct qede_arfs_fltr_node *
371 qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
372                           __be16 src_port, __be16 dst_port, u8 ip_proto)
373 {
374         struct qede_arfs_fltr_node *tpos;
375
376         hlist_for_each_entry(tpos, h, node)
377                 if (tpos->tuple.ip_proto == ip_proto &&
378                     tpos->tuple.eth_proto == skb->protocol &&
379                     qede_compare_ip_addr(tpos, skb) &&
380                     tpos->tuple.src_port == src_port &&
381                     tpos->tuple.dst_port == dst_port)
382                         return tpos;
383
384         return NULL;
385 }
386
387 static struct qede_arfs_fltr_node *
388 qede_alloc_filter(struct qede_dev *edev, int min_hlen)
389 {
390         struct qede_arfs_fltr_node *n;
391         int bit_id;
392
393         bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
394                                      QEDE_RFS_MAX_FLTR);
395
396         if (bit_id >= QEDE_RFS_MAX_FLTR)
397                 return NULL;
398
399         n = kzalloc(sizeof(*n), GFP_ATOMIC);
400         if (!n)
401                 return NULL;
402
403         n->data = kzalloc(min_hlen, GFP_ATOMIC);
404         if (!n->data) {
405                 kfree(n);
406                 return NULL;
407         }
408
409         n->sw_id = (u16)bit_id;
410         set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
411         return n;
412 }
413
414 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
415                        u16 rxq_index, u32 flow_id)
416 {
417         struct qede_dev *edev = netdev_priv(dev);
418         struct qede_arfs_fltr_node *n;
419         int min_hlen, rc, tp_offset;
420         struct ethhdr *eth;
421         __be16 *ports;
422         u16 tbl_idx;
423         u8 ip_proto;
424
425         if (skb->encapsulation)
426                 return -EPROTONOSUPPORT;
427
428         if (skb->protocol != htons(ETH_P_IP) &&
429             skb->protocol != htons(ETH_P_IPV6))
430                 return -EPROTONOSUPPORT;
431
432         if (skb->protocol == htons(ETH_P_IP)) {
433                 ip_proto = ip_hdr(skb)->protocol;
434                 tp_offset = sizeof(struct iphdr);
435         } else {
436                 ip_proto = ipv6_hdr(skb)->nexthdr;
437                 tp_offset = sizeof(struct ipv6hdr);
438         }
439
440         if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
441                 return -EPROTONOSUPPORT;
442
443         ports = (__be16 *)(skb->data + tp_offset);
444         tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
445
446         spin_lock_bh(&edev->arfs->arfs_list_lock);
447
448         n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx),
449                                       skb, ports[0], ports[1], ip_proto);
450         if (n) {
451                 /* Filter match */
452                 n->next_rxq_id = rxq_index;
453
454                 if (test_bit(QEDE_FLTR_VALID, &n->state)) {
455                         if (n->rxq_id != rxq_index)
456                                 qede_configure_arfs_fltr(edev, n, n->rxq_id,
457                                                          false);
458                 } else {
459                         if (!n->used) {
460                                 n->rxq_id = rxq_index;
461                                 qede_configure_arfs_fltr(edev, n, n->rxq_id,
462                                                          true);
463                         }
464                 }
465
466                 rc = n->sw_id;
467                 goto ret_unlock;
468         }
469
470         min_hlen = ETH_HLEN + skb_headlen(skb);
471
472         n = qede_alloc_filter(edev, min_hlen);
473         if (!n) {
474                 rc = -ENOMEM;
475                 goto ret_unlock;
476         }
477
478         n->buf_len = min_hlen;
479         n->rxq_id = rxq_index;
480         n->next_rxq_id = rxq_index;
481         n->tuple.src_port = ports[0];
482         n->tuple.dst_port = ports[1];
483         n->flow_id = flow_id;
484
485         if (skb->protocol == htons(ETH_P_IP)) {
486                 n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
487                 n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
488         } else {
489                 memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
490                        sizeof(struct in6_addr));
491                 memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
492                        sizeof(struct in6_addr));
493         }
494
495         eth = (struct ethhdr *)n->data;
496         eth->h_proto = skb->protocol;
497         n->tuple.eth_proto = skb->protocol;
498         n->tuple.ip_proto = ip_proto;
499         memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
500
501         rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
502         if (rc)
503                 goto ret_unlock;
504
505         qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
506
507         spin_unlock_bh(&edev->arfs->arfs_list_lock);
508
509         set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
510         schedule_delayed_work(&edev->sp_task, 0);
511
512         return n->sw_id;
513
514 ret_unlock:
515         spin_unlock_bh(&edev->arfs->arfs_list_lock);
516         return rc;
517 }
518 #endif
519
520 void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
521 {
522         struct qede_dev *edev = dev;
523
524         if (edev->vxlan_dst_port != vxlan_port)
525                 edev->vxlan_dst_port = 0;
526
527         if (edev->geneve_dst_port != geneve_port)
528                 edev->geneve_dst_port = 0;
529 }
530
531 void qede_force_mac(void *dev, u8 *mac, bool forced)
532 {
533         struct qede_dev *edev = dev;
534
535         __qede_lock(edev);
536
537         /* MAC hints take effect only if we haven't set one already */
538         if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) {
539                 __qede_unlock(edev);
540                 return;
541         }
542
543         ether_addr_copy(edev->ndev->dev_addr, mac);
544         __qede_unlock(edev);
545 }
546
547 void qede_fill_rss_params(struct qede_dev *edev,
548                           struct qed_update_vport_rss_params *rss, u8 *update)
549 {
550         bool need_reset = false;
551         int i;
552
553         if (QEDE_RSS_COUNT(edev) <= 1) {
554                 memset(rss, 0, sizeof(*rss));
555                 *update = 0;
556                 return;
557         }
558
559         /* Need to validate current RSS config uses valid entries */
560         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
561                 if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
562                         need_reset = true;
563                         break;
564                 }
565         }
566
567         if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
568                 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
569                         u16 indir_val, val;
570
571                         val = QEDE_RSS_COUNT(edev);
572                         indir_val = ethtool_rxfh_indir_default(i, val);
573                         edev->rss_ind_table[i] = indir_val;
574                 }
575                 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
576         }
577
578         /* Now that we have the queue-indirection, prepare the handles */
579         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
580                 u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
581
582                 rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
583         }
584
585         if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
586                 netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
587                 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
588         }
589         memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
590
591         if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
592                 edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
593                     QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
594                 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
595         }
596         rss->rss_caps = edev->rss_caps;
597
598         *update = 1;
599 }
600
601 static int qede_set_ucast_rx_mac(struct qede_dev *edev,
602                                  enum qed_filter_xcast_params_type opcode,
603                                  unsigned char mac[ETH_ALEN])
604 {
605         struct qed_filter_params filter_cmd;
606
607         memset(&filter_cmd, 0, sizeof(filter_cmd));
608         filter_cmd.type = QED_FILTER_TYPE_UCAST;
609         filter_cmd.filter.ucast.type = opcode;
610         filter_cmd.filter.ucast.mac_valid = 1;
611         ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
612
613         return edev->ops->filter_config(edev->cdev, &filter_cmd);
614 }
615
616 static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
617                                   enum qed_filter_xcast_params_type opcode,
618                                   u16 vid)
619 {
620         struct qed_filter_params filter_cmd;
621
622         memset(&filter_cmd, 0, sizeof(filter_cmd));
623         filter_cmd.type = QED_FILTER_TYPE_UCAST;
624         filter_cmd.filter.ucast.type = opcode;
625         filter_cmd.filter.ucast.vlan_valid = 1;
626         filter_cmd.filter.ucast.vlan = vid;
627
628         return edev->ops->filter_config(edev->cdev, &filter_cmd);
629 }
630
631 static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
632 {
633         struct qed_update_vport_params *params;
634         int rc;
635
636         /* Proceed only if action actually needs to be performed */
637         if (edev->accept_any_vlan == action)
638                 return 0;
639
640         params = vzalloc(sizeof(*params));
641         if (!params)
642                 return -ENOMEM;
643
644         params->vport_id = 0;
645         params->accept_any_vlan = action;
646         params->update_accept_any_vlan_flg = 1;
647
648         rc = edev->ops->vport_update(edev->cdev, params);
649         if (rc) {
650                 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
651                        action ? "enable" : "disable");
652         } else {
653                 DP_INFO(edev, "%s accept-any-vlan\n",
654                         action ? "enabled" : "disabled");
655                 edev->accept_any_vlan = action;
656         }
657
658         vfree(params);
659         return 0;
660 }
661
662 int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
663 {
664         struct qede_dev *edev = netdev_priv(dev);
665         struct qede_vlan *vlan, *tmp;
666         int rc = 0;
667
668         DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
669
670         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
671         if (!vlan) {
672                 DP_INFO(edev, "Failed to allocate struct for vlan\n");
673                 return -ENOMEM;
674         }
675         INIT_LIST_HEAD(&vlan->list);
676         vlan->vid = vid;
677         vlan->configured = false;
678
679         /* Verify vlan isn't already configured */
680         list_for_each_entry(tmp, &edev->vlan_list, list) {
681                 if (tmp->vid == vlan->vid) {
682                         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
683                                    "vlan already configured\n");
684                         kfree(vlan);
685                         return -EEXIST;
686                 }
687         }
688
689         /* If interface is down, cache this VLAN ID and return */
690         __qede_lock(edev);
691         if (edev->state != QEDE_STATE_OPEN) {
692                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
693                            "Interface is down, VLAN %d will be configured when interface is up\n",
694                            vid);
695                 if (vid != 0)
696                         edev->non_configured_vlans++;
697                 list_add(&vlan->list, &edev->vlan_list);
698                 goto out;
699         }
700
701         /* Check for the filter limit.
702          * Note - vlan0 has a reserved filter and can be added without
703          * worrying about quota
704          */
705         if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
706             (vlan->vid == 0)) {
707                 rc = qede_set_ucast_rx_vlan(edev,
708                                             QED_FILTER_XCAST_TYPE_ADD,
709                                             vlan->vid);
710                 if (rc) {
711                         DP_ERR(edev, "Failed to configure VLAN %d\n",
712                                vlan->vid);
713                         kfree(vlan);
714                         goto out;
715                 }
716                 vlan->configured = true;
717
718                 /* vlan0 filter isn't consuming out of our quota */
719                 if (vlan->vid != 0)
720                         edev->configured_vlans++;
721         } else {
722                 /* Out of quota; Activate accept-any-VLAN mode */
723                 if (!edev->non_configured_vlans) {
724                         rc = qede_config_accept_any_vlan(edev, true);
725                         if (rc) {
726                                 kfree(vlan);
727                                 goto out;
728                         }
729                 }
730
731                 edev->non_configured_vlans++;
732         }
733
734         list_add(&vlan->list, &edev->vlan_list);
735
736 out:
737         __qede_unlock(edev);
738         return rc;
739 }
740
741 static void qede_del_vlan_from_list(struct qede_dev *edev,
742                                     struct qede_vlan *vlan)
743 {
744         /* vlan0 filter isn't consuming out of our quota */
745         if (vlan->vid != 0) {
746                 if (vlan->configured)
747                         edev->configured_vlans--;
748                 else
749                         edev->non_configured_vlans--;
750         }
751
752         list_del(&vlan->list);
753         kfree(vlan);
754 }
755
756 int qede_configure_vlan_filters(struct qede_dev *edev)
757 {
758         int rc = 0, real_rc = 0, accept_any_vlan = 0;
759         struct qed_dev_eth_info *dev_info;
760         struct qede_vlan *vlan = NULL;
761
762         if (list_empty(&edev->vlan_list))
763                 return 0;
764
765         dev_info = &edev->dev_info;
766
767         /* Configure non-configured vlans */
768         list_for_each_entry(vlan, &edev->vlan_list, list) {
769                 if (vlan->configured)
770                         continue;
771
772                 /* We have used all our credits, now enable accept_any_vlan */
773                 if ((vlan->vid != 0) &&
774                     (edev->configured_vlans == dev_info->num_vlan_filters)) {
775                         accept_any_vlan = 1;
776                         continue;
777                 }
778
779                 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
780
781                 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
782                                             vlan->vid);
783                 if (rc) {
784                         DP_ERR(edev, "Failed to configure VLAN %u\n",
785                                vlan->vid);
786                         real_rc = rc;
787                         continue;
788                 }
789
790                 vlan->configured = true;
791                 /* vlan0 filter doesn't consume our VLAN filter's quota */
792                 if (vlan->vid != 0) {
793                         edev->non_configured_vlans--;
794                         edev->configured_vlans++;
795                 }
796         }
797
798         /* enable accept_any_vlan mode if we have more VLANs than credits,
799          * or remove accept_any_vlan mode if we've actually removed
800          * a non-configured vlan, and all remaining vlans are truly configured.
801          */
802
803         if (accept_any_vlan)
804                 rc = qede_config_accept_any_vlan(edev, true);
805         else if (!edev->non_configured_vlans)
806                 rc = qede_config_accept_any_vlan(edev, false);
807
808         if (rc && !real_rc)
809                 real_rc = rc;
810
811         return real_rc;
812 }
813
814 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
815 {
816         struct qede_dev *edev = netdev_priv(dev);
817         struct qede_vlan *vlan = NULL;
818         int rc = 0;
819
820         DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
821
822         /* Find whether entry exists */
823         __qede_lock(edev);
824         list_for_each_entry(vlan, &edev->vlan_list, list)
825                 if (vlan->vid == vid)
826                         break;
827
828         if (!vlan || (vlan->vid != vid)) {
829                 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
830                            "Vlan isn't configured\n");
831                 goto out;
832         }
833
834         if (edev->state != QEDE_STATE_OPEN) {
835                 /* As interface is already down, we don't have a VPORT
836                  * instance to remove vlan filter. So just update vlan list
837                  */
838                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
839                            "Interface is down, removing VLAN from list only\n");
840                 qede_del_vlan_from_list(edev, vlan);
841                 goto out;
842         }
843
844         /* Remove vlan */
845         if (vlan->configured) {
846                 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
847                                             vid);
848                 if (rc) {
849                         DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
850                         goto out;
851                 }
852         }
853
854         qede_del_vlan_from_list(edev, vlan);
855
856         /* We have removed a VLAN - try to see if we can
857          * configure non-configured VLAN from the list.
858          */
859         rc = qede_configure_vlan_filters(edev);
860
861 out:
862         __qede_unlock(edev);
863         return rc;
864 }
865
866 void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
867 {
868         struct qede_vlan *vlan = NULL;
869
870         if (list_empty(&edev->vlan_list))
871                 return;
872
873         list_for_each_entry(vlan, &edev->vlan_list, list) {
874                 if (!vlan->configured)
875                         continue;
876
877                 vlan->configured = false;
878
879                 /* vlan0 filter isn't consuming out of our quota */
880                 if (vlan->vid != 0) {
881                         edev->non_configured_vlans++;
882                         edev->configured_vlans--;
883                 }
884
885                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
886                            "marked vlan %d as non-configured\n", vlan->vid);
887         }
888
889         edev->accept_any_vlan = false;
890 }
891
892 static void qede_set_features_reload(struct qede_dev *edev,
893                                      struct qede_reload_args *args)
894 {
895         edev->ndev->features = args->u.features;
896 }
897
898 int qede_set_features(struct net_device *dev, netdev_features_t features)
899 {
900         struct qede_dev *edev = netdev_priv(dev);
901         netdev_features_t changes = features ^ dev->features;
902         bool need_reload = false;
903
904         /* No action needed if hardware GRO is disabled during driver load */
905         if (changes & NETIF_F_GRO) {
906                 if (dev->features & NETIF_F_GRO)
907                         need_reload = !edev->gro_disable;
908                 else
909                         need_reload = edev->gro_disable;
910         }
911
912         if (need_reload) {
913                 struct qede_reload_args args;
914
915                 args.u.features = features;
916                 args.func = &qede_set_features_reload;
917
918                 /* Make sure that we definitely need to reload.
919                  * In case of an eBPF attached program, there will be no FW
920                  * aggregations, so no need to actually reload.
921                  */
922                 __qede_lock(edev);
923                 if (edev->xdp_prog)
924                         args.func(edev, &args);
925                 else
926                         qede_reload(edev, &args, true);
927                 __qede_unlock(edev);
928
929                 return 1;
930         }
931
932         return 0;
933 }
934
935 void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
936 {
937         struct qede_dev *edev = netdev_priv(dev);
938         struct qed_tunn_params tunn_params;
939         u16 t_port = ntohs(ti->port);
940         int rc;
941
942         memset(&tunn_params, 0, sizeof(tunn_params));
943
944         switch (ti->type) {
945         case UDP_TUNNEL_TYPE_VXLAN:
946                 if (!edev->dev_info.common.vxlan_enable)
947                         return;
948
949                 if (edev->vxlan_dst_port)
950                         return;
951
952                 tunn_params.update_vxlan_port = 1;
953                 tunn_params.vxlan_port = t_port;
954
955                 __qede_lock(edev);
956                 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
957                 __qede_unlock(edev);
958
959                 if (!rc) {
960                         edev->vxlan_dst_port = t_port;
961                         DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
962                                    t_port);
963                 } else {
964                         DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
965                                   t_port);
966                 }
967
968                 break;
969         case UDP_TUNNEL_TYPE_GENEVE:
970                 if (!edev->dev_info.common.geneve_enable)
971                         return;
972
973                 if (edev->geneve_dst_port)
974                         return;
975
976                 tunn_params.update_geneve_port = 1;
977                 tunn_params.geneve_port = t_port;
978
979                 __qede_lock(edev);
980                 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
981                 __qede_unlock(edev);
982
983                 if (!rc) {
984                         edev->geneve_dst_port = t_port;
985                         DP_VERBOSE(edev, QED_MSG_DEBUG,
986                                    "Added geneve port=%d\n", t_port);
987                 } else {
988                         DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
989                                   t_port);
990                 }
991
992                 break;
993         default:
994                 return;
995         }
996 }
997
998 void qede_udp_tunnel_del(struct net_device *dev,
999                          struct udp_tunnel_info *ti)
1000 {
1001         struct qede_dev *edev = netdev_priv(dev);
1002         struct qed_tunn_params tunn_params;
1003         u16 t_port = ntohs(ti->port);
1004
1005         memset(&tunn_params, 0, sizeof(tunn_params));
1006
1007         switch (ti->type) {
1008         case UDP_TUNNEL_TYPE_VXLAN:
1009                 if (t_port != edev->vxlan_dst_port)
1010                         return;
1011
1012                 tunn_params.update_vxlan_port = 1;
1013                 tunn_params.vxlan_port = 0;
1014
1015                 __qede_lock(edev);
1016                 edev->ops->tunn_config(edev->cdev, &tunn_params);
1017                 __qede_unlock(edev);
1018
1019                 edev->vxlan_dst_port = 0;
1020
1021                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
1022                            t_port);
1023
1024                 break;
1025         case UDP_TUNNEL_TYPE_GENEVE:
1026                 if (t_port != edev->geneve_dst_port)
1027                         return;
1028
1029                 tunn_params.update_geneve_port = 1;
1030                 tunn_params.geneve_port = 0;
1031
1032                 __qede_lock(edev);
1033                 edev->ops->tunn_config(edev->cdev, &tunn_params);
1034                 __qede_unlock(edev);
1035
1036                 edev->geneve_dst_port = 0;
1037
1038                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
1039                            t_port);
1040                 break;
1041         default:
1042                 return;
1043         }
1044 }
1045
1046 static void qede_xdp_reload_func(struct qede_dev *edev,
1047                                  struct qede_reload_args *args)
1048 {
1049         struct bpf_prog *old;
1050
1051         old = xchg(&edev->xdp_prog, args->u.new_prog);
1052         if (old)
1053                 bpf_prog_put(old);
1054 }
1055
1056 static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1057 {
1058         struct qede_reload_args args;
1059
1060         /* If we're called, there was already a bpf reference increment */
1061         args.func = &qede_xdp_reload_func;
1062         args.u.new_prog = prog;
1063         qede_reload(edev, &args, false);
1064
1065         return 0;
1066 }
1067
1068 int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
1069 {
1070         struct qede_dev *edev = netdev_priv(dev);
1071
1072         switch (xdp->command) {
1073         case XDP_SETUP_PROG:
1074                 return qede_xdp_set(edev, xdp->prog);
1075         case XDP_QUERY_PROG:
1076                 xdp->prog_attached = !!edev->xdp_prog;
1077                 xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
1078                 return 0;
1079         default:
1080                 return -EINVAL;
1081         }
1082 }
1083
1084 static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1085                                  enum qed_filter_xcast_params_type opcode,
1086                                  unsigned char *mac, int num_macs)
1087 {
1088         struct qed_filter_params filter_cmd;
1089         int i;
1090
1091         memset(&filter_cmd, 0, sizeof(filter_cmd));
1092         filter_cmd.type = QED_FILTER_TYPE_MCAST;
1093         filter_cmd.filter.mcast.type = opcode;
1094         filter_cmd.filter.mcast.num = num_macs;
1095
1096         for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1097                 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1098
1099         return edev->ops->filter_config(edev->cdev, &filter_cmd);
1100 }
1101
1102 int qede_set_mac_addr(struct net_device *ndev, void *p)
1103 {
1104         struct qede_dev *edev = netdev_priv(ndev);
1105         struct sockaddr *addr = p;
1106         int rc = 0;
1107
1108         /* Make sure the state doesn't transition while changing the MAC.
1109          * Also, all flows accessing the dev_addr field are doing that under
1110          * this lock.
1111          */
1112         __qede_lock(edev);
1113
1114         if (!is_valid_ether_addr(addr->sa_data)) {
1115                 DP_NOTICE(edev, "The MAC address is not valid\n");
1116                 rc = -EFAULT;
1117                 goto out;
1118         }
1119
1120         if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1121                 DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
1122                           addr->sa_data);
1123                 rc = -EINVAL;
1124                 goto out;
1125         }
1126
1127         if (edev->state == QEDE_STATE_OPEN) {
1128                 /* Remove the previous primary mac */
1129                 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1130                                            ndev->dev_addr);
1131                 if (rc)
1132                         goto out;
1133         }
1134
1135         ether_addr_copy(ndev->dev_addr, addr->sa_data);
1136         DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
1137
1138         if (edev->state != QEDE_STATE_OPEN) {
1139                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1140                            "The device is currently down\n");
1141                 goto out;
1142         }
1143
1144         edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
1145
1146         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1147                                    ndev->dev_addr);
1148 out:
1149         __qede_unlock(edev);
1150         return rc;
1151 }
1152
1153 static int
1154 qede_configure_mcast_filtering(struct net_device *ndev,
1155                                enum qed_filter_rx_mode_type *accept_flags)
1156 {
1157         struct qede_dev *edev = netdev_priv(ndev);
1158         unsigned char *mc_macs, *temp;
1159         struct netdev_hw_addr *ha;
1160         int rc = 0, mc_count;
1161         size_t size;
1162
1163         size = 64 * ETH_ALEN;
1164
1165         mc_macs = kzalloc(size, GFP_KERNEL);
1166         if (!mc_macs) {
1167                 DP_NOTICE(edev,
1168                           "Failed to allocate memory for multicast MACs\n");
1169                 rc = -ENOMEM;
1170                 goto exit;
1171         }
1172
1173         temp = mc_macs;
1174
1175         /* Remove all previously configured MAC filters */
1176         rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1177                                    mc_macs, 1);
1178         if (rc)
1179                 goto exit;
1180
1181         netif_addr_lock_bh(ndev);
1182
1183         mc_count = netdev_mc_count(ndev);
1184         if (mc_count <= 64) {
1185                 netdev_for_each_mc_addr(ha, ndev) {
1186                         ether_addr_copy(temp, ha->addr);
1187                         temp += ETH_ALEN;
1188                 }
1189         }
1190
1191         netif_addr_unlock_bh(ndev);
1192
1193         /* Check for all multicast @@@TBD resource allocation */
1194         if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1195                 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1196                         *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1197         } else {
1198                 /* Add all multicast MAC filters */
1199                 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1200                                            mc_macs, mc_count);
1201         }
1202
1203 exit:
1204         kfree(mc_macs);
1205         return rc;
1206 }
1207
1208 void qede_set_rx_mode(struct net_device *ndev)
1209 {
1210         struct qede_dev *edev = netdev_priv(ndev);
1211
1212         set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1213         schedule_delayed_work(&edev->sp_task, 0);
1214 }
1215
1216 /* Must be called with qede_lock held */
1217 void qede_config_rx_mode(struct net_device *ndev)
1218 {
1219         enum qed_filter_rx_mode_type accept_flags;
1220         struct qede_dev *edev = netdev_priv(ndev);
1221         struct qed_filter_params rx_mode;
1222         unsigned char *uc_macs, *temp;
1223         struct netdev_hw_addr *ha;
1224         int rc, uc_count;
1225         size_t size;
1226
1227         netif_addr_lock_bh(ndev);
1228
1229         uc_count = netdev_uc_count(ndev);
1230         size = uc_count * ETH_ALEN;
1231
1232         uc_macs = kzalloc(size, GFP_ATOMIC);
1233         if (!uc_macs) {
1234                 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1235                 netif_addr_unlock_bh(ndev);
1236                 return;
1237         }
1238
1239         temp = uc_macs;
1240         netdev_for_each_uc_addr(ha, ndev) {
1241                 ether_addr_copy(temp, ha->addr);
1242                 temp += ETH_ALEN;
1243         }
1244
1245         netif_addr_unlock_bh(ndev);
1246
1247         /* Configure the struct for the Rx mode */
1248         memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1249         rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1250
1251         /* Remove all previous unicast secondary macs and multicast macs
1252          * (configrue / leave the primary mac)
1253          */
1254         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1255                                    edev->ndev->dev_addr);
1256         if (rc)
1257                 goto out;
1258
1259         /* Check for promiscuous */
1260         if (ndev->flags & IFF_PROMISC)
1261                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1262         else
1263                 accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1264
1265         /* Configure all filters regardless, in case promisc is rejected */
1266         if (uc_count < edev->dev_info.num_mac_filters) {
1267                 int i;
1268
1269                 temp = uc_macs;
1270                 for (i = 0; i < uc_count; i++) {
1271                         rc = qede_set_ucast_rx_mac(edev,
1272                                                    QED_FILTER_XCAST_TYPE_ADD,
1273                                                    temp);
1274                         if (rc)
1275                                 goto out;
1276
1277                         temp += ETH_ALEN;
1278                 }
1279         } else {
1280                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1281         }
1282
1283         rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1284         if (rc)
1285                 goto out;
1286
1287         /* take care of VLAN mode */
1288         if (ndev->flags & IFF_PROMISC) {
1289                 qede_config_accept_any_vlan(edev, true);
1290         } else if (!edev->non_configured_vlans) {
1291                 /* It's possible that accept_any_vlan mode is set due to a
1292                  * previous setting of IFF_PROMISC. If vlan credits are
1293                  * sufficient, disable accept_any_vlan.
1294                  */
1295                 qede_config_accept_any_vlan(edev, false);
1296         }
1297
1298         rx_mode.filter.accept_flags = accept_flags;
1299         edev->ops->filter_config(edev->cdev, &rx_mode);
1300 out:
1301         kfree(uc_macs);
1302 }
1303
1304 static struct qede_arfs_fltr_node *
1305 qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location)
1306 {
1307         struct qede_arfs_fltr_node *fltr;
1308
1309         hlist_for_each_entry(fltr, head, node)
1310                 if (location == fltr->sw_id)
1311                         return fltr;
1312
1313         return NULL;
1314 }
1315
1316 static bool
1317 qede_compare_user_flow_ips(struct qede_arfs_fltr_node *tpos,
1318                            struct ethtool_rx_flow_spec *fsp,
1319                            __be16 proto)
1320 {
1321         if (proto == htons(ETH_P_IP)) {
1322                 struct ethtool_tcpip4_spec *ip;
1323
1324                 ip = &fsp->h_u.tcp_ip4_spec;
1325
1326                 if (tpos->tuple.src_ipv4 == ip->ip4src &&
1327                     tpos->tuple.dst_ipv4 == ip->ip4dst)
1328                         return true;
1329                 else
1330                         return false;
1331         } else {
1332                 struct ethtool_tcpip6_spec *ip6;
1333                 struct in6_addr *src;
1334
1335                 ip6 = &fsp->h_u.tcp_ip6_spec;
1336                 src = &tpos->tuple.src_ipv6;
1337
1338                 if (!memcmp(src, &ip6->ip6src, sizeof(struct in6_addr)) &&
1339                     !memcmp(&tpos->tuple.dst_ipv6, &ip6->ip6dst,
1340                             sizeof(struct in6_addr)))
1341                         return true;
1342                 else
1343                         return false;
1344         }
1345         return false;
1346 }
1347
1348 int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
1349                           u32 *rule_locs)
1350 {
1351         struct qede_arfs_fltr_node *fltr;
1352         struct hlist_head *head;
1353         int cnt = 0, rc = 0;
1354
1355         info->data = QEDE_RFS_MAX_FLTR;
1356
1357         __qede_lock(edev);
1358
1359         if (!edev->arfs) {
1360                 rc = -EPERM;
1361                 goto unlock;
1362         }
1363
1364         head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1365
1366         hlist_for_each_entry(fltr, head, node) {
1367                 if (cnt == info->rule_cnt) {
1368                         rc = -EMSGSIZE;
1369                         goto unlock;
1370                 }
1371
1372                 rule_locs[cnt] = fltr->sw_id;
1373                 cnt++;
1374         }
1375
1376         info->rule_cnt = cnt;
1377
1378 unlock:
1379         __qede_unlock(edev);
1380         return rc;
1381 }
1382
1383 int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
1384 {
1385         struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1386         struct qede_arfs_fltr_node *fltr = NULL;
1387         int rc = 0;
1388
1389         cmd->data = QEDE_RFS_MAX_FLTR;
1390
1391         __qede_lock(edev);
1392
1393         if (!edev->arfs) {
1394                 rc = -EPERM;
1395                 goto unlock;
1396         }
1397
1398         fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1399                                          fsp->location);
1400         if (!fltr) {
1401                 DP_NOTICE(edev, "Rule not found - location=0x%x\n",
1402                           fsp->location);
1403                 rc = -EINVAL;
1404                 goto unlock;
1405         }
1406
1407         if (fltr->tuple.eth_proto == htons(ETH_P_IP)) {
1408                 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1409                         fsp->flow_type = TCP_V4_FLOW;
1410                 else
1411                         fsp->flow_type = UDP_V4_FLOW;
1412
1413                 fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port;
1414                 fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port;
1415                 fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4;
1416                 fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4;
1417         } else {
1418                 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1419                         fsp->flow_type = TCP_V6_FLOW;
1420                 else
1421                         fsp->flow_type = UDP_V6_FLOW;
1422                 fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port;
1423                 fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port;
1424                 memcpy(&fsp->h_u.tcp_ip6_spec.ip6src,
1425                        &fltr->tuple.src_ipv6, sizeof(struct in6_addr));
1426                 memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst,
1427                        &fltr->tuple.dst_ipv6, sizeof(struct in6_addr));
1428         }
1429
1430         fsp->ring_cookie = fltr->rxq_id;
1431
1432 unlock:
1433         __qede_unlock(edev);
1434         return rc;
1435 }
1436
1437 static int
1438 qede_validate_and_check_flow_exist(struct qede_dev *edev,
1439                                    struct ethtool_rx_flow_spec *fsp,
1440                                    int *min_hlen)
1441 {
1442         __be16 src_port = 0x0, dst_port = 0x0;
1443         struct qede_arfs_fltr_node *fltr;
1444         struct hlist_node *temp;
1445         struct hlist_head *head;
1446         __be16 eth_proto;
1447         u8 ip_proto;
1448
1449         if (fsp->location >= QEDE_RFS_MAX_FLTR ||
1450             fsp->ring_cookie >= QEDE_RSS_COUNT(edev))
1451                 return -EINVAL;
1452
1453         if (fsp->flow_type == TCP_V4_FLOW) {
1454                 *min_hlen += sizeof(struct iphdr) +
1455                                 sizeof(struct tcphdr);
1456                 eth_proto = htons(ETH_P_IP);
1457                 ip_proto = IPPROTO_TCP;
1458         } else if (fsp->flow_type == UDP_V4_FLOW) {
1459                 *min_hlen += sizeof(struct iphdr) +
1460                                 sizeof(struct udphdr);
1461                 eth_proto = htons(ETH_P_IP);
1462                 ip_proto = IPPROTO_UDP;
1463         } else if (fsp->flow_type == TCP_V6_FLOW) {
1464                 *min_hlen += sizeof(struct ipv6hdr) +
1465                                 sizeof(struct tcphdr);
1466                 eth_proto = htons(ETH_P_IPV6);
1467                 ip_proto = IPPROTO_TCP;
1468         } else if (fsp->flow_type == UDP_V6_FLOW) {
1469                 *min_hlen += sizeof(struct ipv6hdr) +
1470                                 sizeof(struct udphdr);
1471                 eth_proto = htons(ETH_P_IPV6);
1472                 ip_proto = IPPROTO_UDP;
1473         } else {
1474                 DP_NOTICE(edev, "Unsupported flow type = 0x%x\n",
1475                           fsp->flow_type);
1476                 return -EPROTONOSUPPORT;
1477         }
1478
1479         if (eth_proto == htons(ETH_P_IP)) {
1480                 src_port = fsp->h_u.tcp_ip4_spec.psrc;
1481                 dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1482         } else {
1483                 src_port = fsp->h_u.tcp_ip6_spec.psrc;
1484                 dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1485         }
1486
1487         head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1488         hlist_for_each_entry_safe(fltr, temp, head, node) {
1489                 if ((fltr->tuple.ip_proto == ip_proto &&
1490                      fltr->tuple.eth_proto == eth_proto &&
1491                      qede_compare_user_flow_ips(fltr, fsp, eth_proto) &&
1492                      fltr->tuple.src_port == src_port &&
1493                      fltr->tuple.dst_port == dst_port) ||
1494                     fltr->sw_id == fsp->location)
1495                         return -EEXIST;
1496         }
1497
1498         return 0;
1499 }
1500
1501 static int
1502 qede_poll_arfs_filter_config(struct qede_dev *edev,
1503                              struct qede_arfs_fltr_node *fltr)
1504 {
1505         int count = QEDE_ARFS_POLL_COUNT;
1506
1507         while (fltr->used && count) {
1508                 msleep(20);
1509                 count--;
1510         }
1511
1512         if (count == 0 || fltr->fw_rc) {
1513                 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1514                 return -EIO;
1515         }
1516
1517         return fltr->fw_rc;
1518 }
1519
1520 int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
1521 {
1522         struct ethtool_rx_flow_spec *fsp = &info->fs;
1523         struct qede_arfs_fltr_node *n;
1524         int min_hlen = ETH_HLEN, rc;
1525         struct ethhdr *eth;
1526         struct iphdr *ip;
1527         __be16 *ports;
1528
1529         __qede_lock(edev);
1530
1531         if (!edev->arfs) {
1532                 rc = -EPERM;
1533                 goto unlock;
1534         }
1535
1536         rc = qede_validate_and_check_flow_exist(edev, fsp, &min_hlen);
1537         if (rc)
1538                 goto unlock;
1539
1540         n = kzalloc(sizeof(*n), GFP_KERNEL);
1541         if (!n) {
1542                 rc = -ENOMEM;
1543                 goto unlock;
1544         }
1545
1546         n->data = kzalloc(min_hlen, GFP_KERNEL);
1547         if (!n->data) {
1548                 kfree(n);
1549                 rc = -ENOMEM;
1550                 goto unlock;
1551         }
1552
1553         n->sw_id = fsp->location;
1554         set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
1555         n->buf_len = min_hlen;
1556         n->rxq_id = fsp->ring_cookie;
1557         n->next_rxq_id = n->rxq_id;
1558         eth = (struct ethhdr *)n->data;
1559
1560         if (info->fs.flow_type == TCP_V4_FLOW ||
1561             info->fs.flow_type == UDP_V4_FLOW) {
1562                 ports = (__be16 *)(n->data + ETH_HLEN +
1563                                         sizeof(struct iphdr));
1564                 eth->h_proto = htons(ETH_P_IP);
1565                 n->tuple.eth_proto = htons(ETH_P_IP);
1566                 n->tuple.src_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4src;
1567                 n->tuple.dst_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4dst;
1568                 n->tuple.src_port = info->fs.h_u.tcp_ip4_spec.psrc;
1569                 n->tuple.dst_port = info->fs.h_u.tcp_ip4_spec.pdst;
1570                 ports[0] = n->tuple.src_port;
1571                 ports[1] = n->tuple.dst_port;
1572                 ip = (struct iphdr *)(n->data + ETH_HLEN);
1573                 ip->saddr = info->fs.h_u.tcp_ip4_spec.ip4src;
1574                 ip->daddr = info->fs.h_u.tcp_ip4_spec.ip4dst;
1575                 ip->version = 0x4;
1576                 ip->ihl = 0x5;
1577
1578                 if (info->fs.flow_type == TCP_V4_FLOW) {
1579                         n->tuple.ip_proto = IPPROTO_TCP;
1580                         ip->protocol = IPPROTO_TCP;
1581                 } else {
1582                         n->tuple.ip_proto = IPPROTO_UDP;
1583                         ip->protocol = IPPROTO_UDP;
1584                 }
1585                 ip->tot_len = cpu_to_be16(min_hlen - ETH_HLEN);
1586         } else {
1587                 struct ipv6hdr *ip6;
1588
1589                 ip6 = (struct ipv6hdr *)(n->data + ETH_HLEN);
1590                 ports = (__be16 *)(n->data + ETH_HLEN +
1591                                         sizeof(struct ipv6hdr));
1592                 eth->h_proto = htons(ETH_P_IPV6);
1593                 n->tuple.eth_proto = htons(ETH_P_IPV6);
1594                 memcpy(&n->tuple.src_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6src,
1595                        sizeof(struct in6_addr));
1596                 memcpy(&n->tuple.dst_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6dst,
1597                        sizeof(struct in6_addr));
1598                 n->tuple.src_port = info->fs.h_u.tcp_ip6_spec.psrc;
1599                 n->tuple.dst_port = info->fs.h_u.tcp_ip6_spec.pdst;
1600                 ports[0] = n->tuple.src_port;
1601                 ports[1] = n->tuple.dst_port;
1602                 memcpy(&ip6->saddr, &n->tuple.src_ipv6,
1603                        sizeof(struct in6_addr));
1604                 memcpy(&ip6->daddr, &n->tuple.dst_ipv6,
1605                        sizeof(struct in6_addr));
1606                 ip6->version = 0x6;
1607
1608                 if (info->fs.flow_type == TCP_V6_FLOW) {
1609                         n->tuple.ip_proto = IPPROTO_TCP;
1610                         ip6->nexthdr = NEXTHDR_TCP;
1611                         ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
1612                 } else {
1613                         n->tuple.ip_proto = IPPROTO_UDP;
1614                         ip6->nexthdr = NEXTHDR_UDP;
1615                         ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
1616                 }
1617         }
1618
1619         rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
1620         if (rc)
1621                 goto unlock;
1622
1623         qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
1624         rc = qede_poll_arfs_filter_config(edev, n);
1625 unlock:
1626         __qede_unlock(edev);
1627         return rc;
1628 }
1629
1630 int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
1631 {
1632         struct ethtool_rx_flow_spec *fsp = &info->fs;
1633         struct qede_arfs_fltr_node *fltr = NULL;
1634         int rc = -EPERM;
1635
1636         __qede_lock(edev);
1637         if (!edev->arfs)
1638                 goto unlock;
1639
1640         fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1641                                          fsp->location);
1642         if (!fltr)
1643                 goto unlock;
1644
1645         qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false);
1646
1647         rc = qede_poll_arfs_filter_config(edev, fltr);
1648         if (rc == 0)
1649                 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1650
1651 unlock:
1652         __qede_unlock(edev);
1653         return rc;
1654 }
1655
1656 int qede_get_arfs_filter_count(struct qede_dev *edev)
1657 {
1658         int count = 0;
1659
1660         __qede_lock(edev);
1661
1662         if (!edev->arfs)
1663                 goto unlock;
1664
1665         count = edev->arfs->filter_count;
1666
1667 unlock:
1668         __qede_unlock(edev);
1669         return count;
1670 }