GNU Linux-libre 4.4.284-gnu1
[releases.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <net/switchdev.h>
52 #include <generated/utsrelease.h>
53
54 #include "spectrum.h"
55 #include "core.h"
56 #include "reg.h"
57 #include "port.h"
58 #include "trap.h"
59 #include "txheader.h"
60
61 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
62 static const char mlxsw_sp_driver_version[] = "1.0";
63
64 /* tx_hdr_version
65  * Tx header version.
66  * Must be set to 1.
67  */
68 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
69
70 /* tx_hdr_ctl
71  * Packet control type.
72  * 0 - Ethernet control (e.g. EMADs, LACP)
73  * 1 - Ethernet data
74  */
75 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
76
77 /* tx_hdr_proto
78  * Packet protocol type. Must be set to 1 (Ethernet).
79  */
80 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
81
82 /* tx_hdr_rx_is_router
83  * Packet is sent from the router. Valid for data packets only.
84  */
85 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
86
87 /* tx_hdr_fid_valid
88  * Indicates if the 'fid' field is valid and should be used for
89  * forwarding lookup. Valid for data packets only.
90  */
91 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
92
93 /* tx_hdr_swid
94  * Switch partition ID. Must be set to 0.
95  */
96 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
97
98 /* tx_hdr_control_tclass
99  * Indicates if the packet should use the control TClass and not one
100  * of the data TClasses.
101  */
102 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
103
104 /* tx_hdr_etclass
105  * Egress TClass to be used on the egress device on the egress port.
106  */
107 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
108
109 /* tx_hdr_port_mid
110  * Destination local port for unicast packets.
111  * Destination multicast ID for multicast packets.
112  *
113  * Control packets are directed to a specific egress port, while data
114  * packets are transmitted through the CPU port (0) into the switch partition,
115  * where forwarding rules are applied.
116  */
117 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
118
119 /* tx_hdr_fid
120  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
121  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
122  * Valid for data packets only.
123  */
124 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
125
126 /* tx_hdr_type
127  * 0 - Data packets
128  * 6 - Control packets
129  */
130 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
131
132 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
133                                      const struct mlxsw_tx_info *tx_info)
134 {
135         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
136
137         memset(txhdr, 0, MLXSW_TXHDR_LEN);
138
139         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
140         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
141         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
142         mlxsw_tx_hdr_swid_set(txhdr, 0);
143         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
144         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
145         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
146 }
147
148 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
149 {
150         char spad_pl[MLXSW_REG_SPAD_LEN];
151         int err;
152
153         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
154         if (err)
155                 return err;
156         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
157         return 0;
158 }
159
160 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
161                                           bool is_up)
162 {
163         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
164         char paos_pl[MLXSW_REG_PAOS_LEN];
165
166         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
167                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
168                             MLXSW_PORT_ADMIN_STATUS_DOWN);
169         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
170 }
171
172 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
173                                          bool *p_is_up)
174 {
175         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
176         char paos_pl[MLXSW_REG_PAOS_LEN];
177         u8 oper_status;
178         int err;
179
180         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
181         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
182         if (err)
183                 return err;
184         oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
185         *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
186         return 0;
187 }
188
189 static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
190 {
191         char sfmr_pl[MLXSW_REG_SFMR_LEN];
192         int err;
193
194         mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
195                             MLXSW_SP_VFID_BASE + vfid, 0);
196         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
197
198         if (err)
199                 return err;
200
201         set_bit(vfid, mlxsw_sp->active_vfids);
202         return 0;
203 }
204
205 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
206 {
207         char sfmr_pl[MLXSW_REG_SFMR_LEN];
208
209         clear_bit(vfid, mlxsw_sp->active_vfids);
210
211         mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
212                             MLXSW_SP_VFID_BASE + vfid, 0);
213         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
214 }
215
216 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
217                                       unsigned char *addr)
218 {
219         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
220         char ppad_pl[MLXSW_REG_PPAD_LEN];
221
222         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
223         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
224         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
225 }
226
227 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
228 {
229         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
230         unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
231
232         ether_addr_copy(addr, mlxsw_sp->base_mac);
233         addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
234         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
235 }
236
237 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
238                                        u16 vid, enum mlxsw_reg_spms_state state)
239 {
240         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
241         char *spms_pl;
242         int err;
243
244         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
245         if (!spms_pl)
246                 return -ENOMEM;
247         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
248         mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
249         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
250         kfree(spms_pl);
251         return err;
252 }
253
254 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
255 {
256         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
257         char pmtu_pl[MLXSW_REG_PMTU_LEN];
258         int max_mtu;
259         int err;
260
261         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
262         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
263         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
264         if (err)
265                 return err;
266         max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
267
268         if (mtu > max_mtu)
269                 return -EINVAL;
270
271         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
272         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
273 }
274
275 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
276 {
277         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
278         char pspa_pl[MLXSW_REG_PSPA_LEN];
279
280         mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
281         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
282 }
283
284 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
285                                      bool enable)
286 {
287         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
288         char svpe_pl[MLXSW_REG_SVPE_LEN];
289
290         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
291         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
292 }
293
294 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
295                                  enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
296                                  u16 vid)
297 {
298         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
299         char svfa_pl[MLXSW_REG_SVFA_LEN];
300
301         mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
302                             fid, vid);
303         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
304 }
305
306 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
307                                           u16 vid, bool learn_enable)
308 {
309         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
310         char *spvmlr_pl;
311         int err;
312
313         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
314         if (!spvmlr_pl)
315                 return -ENOMEM;
316         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
317                               learn_enable);
318         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
319         kfree(spvmlr_pl);
320         return err;
321 }
322
323 static int
324 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
325 {
326         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
327         char sspr_pl[MLXSW_REG_SSPR_LEN];
328
329         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
330         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
331 }
332
333 static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
334                                       bool *p_usable)
335 {
336         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
337         char pmlp_pl[MLXSW_REG_PMLP_LEN];
338         int err;
339
340         mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
341         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
342         if (err)
343                 return err;
344         *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
345         return 0;
346 }
347
348 static int mlxsw_sp_port_open(struct net_device *dev)
349 {
350         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
351         int err;
352
353         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
354         if (err)
355                 return err;
356         netif_start_queue(dev);
357         return 0;
358 }
359
360 static int mlxsw_sp_port_stop(struct net_device *dev)
361 {
362         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
363
364         netif_stop_queue(dev);
365         return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
366 }
367
368 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
369                                       struct net_device *dev)
370 {
371         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
372         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
373         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
374         const struct mlxsw_tx_info tx_info = {
375                 .local_port = mlxsw_sp_port->local_port,
376                 .is_emad = false,
377         };
378         u64 len;
379         int err;
380
381         if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
382                 return NETDEV_TX_BUSY;
383
384         if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
385                 struct sk_buff *skb_orig = skb;
386
387                 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
388                 if (!skb) {
389                         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
390                         dev_kfree_skb_any(skb_orig);
391                         return NETDEV_TX_OK;
392                 }
393                 dev_consume_skb_any(skb_orig);
394         }
395
396         if (eth_skb_pad(skb)) {
397                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
398                 return NETDEV_TX_OK;
399         }
400
401         mlxsw_sp_txhdr_construct(skb, &tx_info);
402         /* TX header is consumed by HW on the way so we shouldn't count its
403          * bytes as being sent.
404          */
405         len = skb->len - MLXSW_TXHDR_LEN;
406
407         /* Due to a race we might fail here because of a full queue. In that
408          * unlikely case we simply drop the packet.
409          */
410         err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
411
412         if (!err) {
413                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
414                 u64_stats_update_begin(&pcpu_stats->syncp);
415                 pcpu_stats->tx_packets++;
416                 pcpu_stats->tx_bytes += len;
417                 u64_stats_update_end(&pcpu_stats->syncp);
418         } else {
419                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
420                 dev_kfree_skb_any(skb);
421         }
422         return NETDEV_TX_OK;
423 }
424
425 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
426 {
427         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
428         struct sockaddr *addr = p;
429         int err;
430
431         if (!is_valid_ether_addr(addr->sa_data))
432                 return -EADDRNOTAVAIL;
433
434         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
435         if (err)
436                 return err;
437         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
438         return 0;
439 }
440
441 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
442 {
443         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
444         int err;
445
446         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
447         if (err)
448                 return err;
449         dev->mtu = mtu;
450         return 0;
451 }
452
453 static struct rtnl_link_stats64 *
454 mlxsw_sp_port_get_stats64(struct net_device *dev,
455                           struct rtnl_link_stats64 *stats)
456 {
457         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
458         struct mlxsw_sp_port_pcpu_stats *p;
459         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
460         u32 tx_dropped = 0;
461         unsigned int start;
462         int i;
463
464         for_each_possible_cpu(i) {
465                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
466                 do {
467                         start = u64_stats_fetch_begin_irq(&p->syncp);
468                         rx_packets      = p->rx_packets;
469                         rx_bytes        = p->rx_bytes;
470                         tx_packets      = p->tx_packets;
471                         tx_bytes        = p->tx_bytes;
472                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
473
474                 stats->rx_packets       += rx_packets;
475                 stats->rx_bytes         += rx_bytes;
476                 stats->tx_packets       += tx_packets;
477                 stats->tx_bytes         += tx_bytes;
478                 /* tx_dropped is u32, updated without syncp protection. */
479                 tx_dropped      += p->tx_dropped;
480         }
481         stats->tx_dropped       = tx_dropped;
482         return stats;
483 }
484
485 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
486                            u16 vid_end, bool is_member, bool untagged)
487 {
488         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
489         char *spvm_pl;
490         int err;
491
492         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
493         if (!spvm_pl)
494                 return -ENOMEM;
495
496         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
497                             vid_end, is_member, untagged);
498         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
499         kfree(spvm_pl);
500         return err;
501 }
502
503 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
504 {
505         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
506         u16 vid, last_visited_vid;
507         int err;
508
509         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
510                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
511                                                    vid);
512                 if (err) {
513                         last_visited_vid = vid;
514                         goto err_port_vid_to_fid_set;
515                 }
516         }
517
518         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
519         if (err) {
520                 last_visited_vid = VLAN_N_VID;
521                 goto err_port_vid_to_fid_set;
522         }
523
524         return 0;
525
526 err_port_vid_to_fid_set:
527         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
528                 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
529                                              vid);
530         return err;
531 }
532
533 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
534 {
535         enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
536         u16 vid;
537         int err;
538
539         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
540         if (err)
541                 return err;
542
543         for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
544                 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
545                                                    vid, vid);
546                 if (err)
547                         return err;
548         }
549
550         return 0;
551 }
552
553 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
554                           u16 vid)
555 {
556         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
557         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
558         char *sftr_pl;
559         int err;
560
561         /* VLAN 0 is added to HW filter when device goes up, but it is
562          * reserved in our case, so simply return.
563          */
564         if (!vid)
565                 return 0;
566
567         if (test_bit(vid, mlxsw_sp_port->active_vfids)) {
568                 netdev_warn(dev, "VID=%d already configured\n", vid);
569                 return 0;
570         }
571
572         if (!test_bit(vid, mlxsw_sp->active_vfids)) {
573                 err = mlxsw_sp_vfid_create(mlxsw_sp, vid);
574                 if (err) {
575                         netdev_err(dev, "Failed to create vFID=%d\n",
576                                    MLXSW_SP_VFID_BASE + vid);
577                         return err;
578                 }
579
580                 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
581                 if (!sftr_pl) {
582                         err = -ENOMEM;
583                         goto err_flood_table_alloc;
584                 }
585                 mlxsw_reg_sftr_pack(sftr_pl, 0, vid,
586                                     MLXSW_REG_SFGC_TABLE_TYPE_FID, 0,
587                                     MLXSW_PORT_CPU_PORT, true);
588                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
589                 kfree(sftr_pl);
590                 if (err) {
591                         netdev_err(dev, "Failed to configure flood table\n");
592                         goto err_flood_table_config;
593                 }
594         }
595
596         /* In case we fail in the following steps, we intentionally do not
597          * destroy the associated vFID.
598          */
599
600         /* When adding the first VLAN interface on a bridged port we need to
601          * transition all the active 802.1Q bridge VLANs to use explicit
602          * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
603          */
604         if (!mlxsw_sp_port->nr_vfids) {
605                 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
606                 if (err) {
607                         netdev_err(dev, "Failed to set to Virtual mode\n");
608                         return err;
609                 }
610         }
611
612         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
613                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
614                                            true, MLXSW_SP_VFID_BASE + vid, vid);
615         if (err) {
616                 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
617                            vid, MLXSW_SP_VFID_BASE + vid);
618                 goto err_port_vid_to_fid_set;
619         }
620
621         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
622         if (err) {
623                 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
624                 goto err_port_vid_learning_set;
625         }
626
627         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false);
628         if (err) {
629                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
630                            vid);
631                 goto err_port_add_vid;
632         }
633
634         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
635                                           MLXSW_REG_SPMS_STATE_FORWARDING);
636         if (err) {
637                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
638                 goto err_port_stp_state_set;
639         }
640
641         mlxsw_sp_port->nr_vfids++;
642         set_bit(vid, mlxsw_sp_port->active_vfids);
643
644         return 0;
645
646 err_flood_table_config:
647 err_flood_table_alloc:
648         mlxsw_sp_vfid_destroy(mlxsw_sp, vid);
649         return err;
650
651 err_port_stp_state_set:
652         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
653 err_port_add_vid:
654         mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
655 err_port_vid_learning_set:
656         mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
657                                      MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
658                                      MLXSW_SP_VFID_BASE + vid, vid);
659 err_port_vid_to_fid_set:
660         mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
661         return err;
662 }
663
664 int mlxsw_sp_port_kill_vid(struct net_device *dev,
665                            __be16 __always_unused proto, u16 vid)
666 {
667         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
668         int err;
669
670         /* VLAN 0 is removed from HW filter when device goes down, but
671          * it is reserved in our case, so simply return.
672          */
673         if (!vid)
674                 return 0;
675
676         if (!test_bit(vid, mlxsw_sp_port->active_vfids)) {
677                 netdev_warn(dev, "VID=%d does not exist\n", vid);
678                 return 0;
679         }
680
681         err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
682                                           MLXSW_REG_SPMS_STATE_DISCARDING);
683         if (err) {
684                 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
685                 return err;
686         }
687
688         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
689         if (err) {
690                 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
691                            vid);
692                 return err;
693         }
694
695         err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
696         if (err) {
697                 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
698                 return err;
699         }
700
701         err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
702                                            MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
703                                            false, MLXSW_SP_VFID_BASE + vid,
704                                            vid);
705         if (err) {
706                 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
707                            vid, MLXSW_SP_VFID_BASE + vid);
708                 return err;
709         }
710
711         /* When removing the last VLAN interface on a bridged port we need to
712          * transition all active 802.1Q bridge VLANs to use VID to FID
713          * mappings and set port's mode to VLAN mode.
714          */
715         if (mlxsw_sp_port->nr_vfids == 1) {
716                 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
717                 if (err) {
718                         netdev_err(dev, "Failed to set to VLAN mode\n");
719                         return err;
720                 }
721         }
722
723         mlxsw_sp_port->nr_vfids--;
724         clear_bit(vid, mlxsw_sp_port->active_vfids);
725
726         return 0;
727 }
728
729 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
730         .ndo_open               = mlxsw_sp_port_open,
731         .ndo_stop               = mlxsw_sp_port_stop,
732         .ndo_start_xmit         = mlxsw_sp_port_xmit,
733         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
734         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
735         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
736         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
737         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
738         .ndo_fdb_add            = switchdev_port_fdb_add,
739         .ndo_fdb_del            = switchdev_port_fdb_del,
740         .ndo_fdb_dump           = switchdev_port_fdb_dump,
741         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
742         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
743         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
744 };
745
746 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
747                                       struct ethtool_drvinfo *drvinfo)
748 {
749         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
750         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
751
752         strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
753         strlcpy(drvinfo->version, mlxsw_sp_driver_version,
754                 sizeof(drvinfo->version));
755         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
756                  "%d.%d.%d",
757                  mlxsw_sp->bus_info->fw_rev.major,
758                  mlxsw_sp->bus_info->fw_rev.minor,
759                  mlxsw_sp->bus_info->fw_rev.subminor);
760         strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
761                 sizeof(drvinfo->bus_info));
762 }
763
764 struct mlxsw_sp_port_hw_stats {
765         char str[ETH_GSTRING_LEN];
766         u64 (*getter)(char *payload);
767 };
768
769 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
770         {
771                 .str = "a_frames_transmitted_ok",
772                 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
773         },
774         {
775                 .str = "a_frames_received_ok",
776                 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
777         },
778         {
779                 .str = "a_frame_check_sequence_errors",
780                 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
781         },
782         {
783                 .str = "a_alignment_errors",
784                 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
785         },
786         {
787                 .str = "a_octets_transmitted_ok",
788                 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
789         },
790         {
791                 .str = "a_octets_received_ok",
792                 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
793         },
794         {
795                 .str = "a_multicast_frames_xmitted_ok",
796                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
797         },
798         {
799                 .str = "a_broadcast_frames_xmitted_ok",
800                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
801         },
802         {
803                 .str = "a_multicast_frames_received_ok",
804                 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
805         },
806         {
807                 .str = "a_broadcast_frames_received_ok",
808                 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
809         },
810         {
811                 .str = "a_in_range_length_errors",
812                 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
813         },
814         {
815                 .str = "a_out_of_range_length_field",
816                 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
817         },
818         {
819                 .str = "a_frame_too_long_errors",
820                 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
821         },
822         {
823                 .str = "a_symbol_error_during_carrier",
824                 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
825         },
826         {
827                 .str = "a_mac_control_frames_transmitted",
828                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
829         },
830         {
831                 .str = "a_mac_control_frames_received",
832                 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
833         },
834         {
835                 .str = "a_unsupported_opcodes_received",
836                 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
837         },
838         {
839                 .str = "a_pause_mac_ctrl_frames_received",
840                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
841         },
842         {
843                 .str = "a_pause_mac_ctrl_frames_xmitted",
844                 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
845         },
846 };
847
848 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
849
850 static void mlxsw_sp_port_get_strings(struct net_device *dev,
851                                       u32 stringset, u8 *data)
852 {
853         u8 *p = data;
854         int i;
855
856         switch (stringset) {
857         case ETH_SS_STATS:
858                 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
859                         memcpy(p, mlxsw_sp_port_hw_stats[i].str,
860                                ETH_GSTRING_LEN);
861                         p += ETH_GSTRING_LEN;
862                 }
863                 break;
864         }
865 }
866
867 static void mlxsw_sp_port_get_stats(struct net_device *dev,
868                                     struct ethtool_stats *stats, u64 *data)
869 {
870         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
871         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
872         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
873         int i;
874         int err;
875
876         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
877         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
878         for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
879                 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
880 }
881
882 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
883 {
884         switch (sset) {
885         case ETH_SS_STATS:
886                 return MLXSW_SP_PORT_HW_STATS_LEN;
887         default:
888                 return -EOPNOTSUPP;
889         }
890 }
891
892 struct mlxsw_sp_port_link_mode {
893         u32 mask;
894         u32 supported;
895         u32 advertised;
896         u32 speed;
897 };
898
899 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
900         {
901                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
902                 .supported      = SUPPORTED_100baseT_Full,
903                 .advertised     = ADVERTISED_100baseT_Full,
904                 .speed          = 100,
905         },
906         {
907                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
908                 .speed          = 100,
909         },
910         {
911                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
912                                   MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
913                 .supported      = SUPPORTED_1000baseKX_Full,
914                 .advertised     = ADVERTISED_1000baseKX_Full,
915                 .speed          = 1000,
916         },
917         {
918                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
919                 .supported      = SUPPORTED_10000baseT_Full,
920                 .advertised     = ADVERTISED_10000baseT_Full,
921                 .speed          = 10000,
922         },
923         {
924                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
925                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
926                 .supported      = SUPPORTED_10000baseKX4_Full,
927                 .advertised     = ADVERTISED_10000baseKX4_Full,
928                 .speed          = 10000,
929         },
930         {
931                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
932                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
933                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
934                                   MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
935                 .supported      = SUPPORTED_10000baseKR_Full,
936                 .advertised     = ADVERTISED_10000baseKR_Full,
937                 .speed          = 10000,
938         },
939         {
940                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
941                 .supported      = SUPPORTED_20000baseKR2_Full,
942                 .advertised     = ADVERTISED_20000baseKR2_Full,
943                 .speed          = 20000,
944         },
945         {
946                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
947                 .supported      = SUPPORTED_40000baseCR4_Full,
948                 .advertised     = ADVERTISED_40000baseCR4_Full,
949                 .speed          = 40000,
950         },
951         {
952                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
953                 .supported      = SUPPORTED_40000baseKR4_Full,
954                 .advertised     = ADVERTISED_40000baseKR4_Full,
955                 .speed          = 40000,
956         },
957         {
958                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
959                 .supported      = SUPPORTED_40000baseSR4_Full,
960                 .advertised     = ADVERTISED_40000baseSR4_Full,
961                 .speed          = 40000,
962         },
963         {
964                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
965                 .supported      = SUPPORTED_40000baseLR4_Full,
966                 .advertised     = ADVERTISED_40000baseLR4_Full,
967                 .speed          = 40000,
968         },
969         {
970                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
971                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
972                                   MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
973                 .speed          = 25000,
974         },
975         {
976                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
977                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
978                                   MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
979                 .speed          = 50000,
980         },
981         {
982                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
983                 .supported      = SUPPORTED_56000baseKR4_Full,
984                 .advertised     = ADVERTISED_56000baseKR4_Full,
985                 .speed          = 56000,
986         },
987         {
988                 .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
989                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
990                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
991                                   MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
992                 .speed          = 100000,
993         },
994 };
995
996 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
997
998 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
999 {
1000         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1001                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1002                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1003                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1004                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1005                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1006                 return SUPPORTED_FIBRE;
1007
1008         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1009                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1010                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1011                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1012                               MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1013                 return SUPPORTED_Backplane;
1014         return 0;
1015 }
1016
1017 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1018 {
1019         u32 modes = 0;
1020         int i;
1021
1022         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1023                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1024                         modes |= mlxsw_sp_port_link_mode[i].supported;
1025         }
1026         return modes;
1027 }
1028
1029 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1030 {
1031         u32 modes = 0;
1032         int i;
1033
1034         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1035                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1036                         modes |= mlxsw_sp_port_link_mode[i].advertised;
1037         }
1038         return modes;
1039 }
1040
1041 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1042                                             struct ethtool_cmd *cmd)
1043 {
1044         u32 speed = SPEED_UNKNOWN;
1045         u8 duplex = DUPLEX_UNKNOWN;
1046         int i;
1047
1048         if (!carrier_ok)
1049                 goto out;
1050
1051         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1052                 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1053                         speed = mlxsw_sp_port_link_mode[i].speed;
1054                         duplex = DUPLEX_FULL;
1055                         break;
1056                 }
1057         }
1058 out:
1059         ethtool_cmd_speed_set(cmd, speed);
1060         cmd->duplex = duplex;
1061 }
1062
1063 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1064 {
1065         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1066                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1067                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1068                               MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1069                 return PORT_FIBRE;
1070
1071         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1072                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1073                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1074                 return PORT_DA;
1075
1076         if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1077                               MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1078                               MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1079                               MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1080                 return PORT_NONE;
1081
1082         return PORT_OTHER;
1083 }
1084
1085 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1086                                       struct ethtool_cmd *cmd)
1087 {
1088         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1089         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1090         char ptys_pl[MLXSW_REG_PTYS_LEN];
1091         u32 eth_proto_cap;
1092         u32 eth_proto_admin;
1093         u32 eth_proto_oper;
1094         int err;
1095
1096         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1097         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1098         if (err) {
1099                 netdev_err(dev, "Failed to get proto");
1100                 return err;
1101         }
1102         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1103                               &eth_proto_admin, &eth_proto_oper);
1104
1105         cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1106                          mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1107                          SUPPORTED_Pause | SUPPORTED_Asym_Pause |
1108                          SUPPORTED_Autoneg;
1109         cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1110         mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1111                                         eth_proto_oper, cmd);
1112
1113         eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1114         cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1115         cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1116
1117         cmd->transceiver = XCVR_INTERNAL;
1118         return 0;
1119 }
1120
1121 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1122 {
1123         u32 ptys_proto = 0;
1124         int i;
1125
1126         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1127                 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1128                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1129         }
1130         return ptys_proto;
1131 }
1132
1133 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1134 {
1135         u32 ptys_proto = 0;
1136         int i;
1137
1138         for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1139                 if (speed == mlxsw_sp_port_link_mode[i].speed)
1140                         ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1141         }
1142         return ptys_proto;
1143 }
1144
1145 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1146                                       struct ethtool_cmd *cmd)
1147 {
1148         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1149         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1150         char ptys_pl[MLXSW_REG_PTYS_LEN];
1151         u32 speed;
1152         u32 eth_proto_new;
1153         u32 eth_proto_cap;
1154         u32 eth_proto_admin;
1155         bool is_up;
1156         int err;
1157
1158         speed = ethtool_cmd_speed(cmd);
1159
1160         eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1161                 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1162                 mlxsw_sp_to_ptys_speed(speed);
1163
1164         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1165         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1166         if (err) {
1167                 netdev_err(dev, "Failed to get proto");
1168                 return err;
1169         }
1170         mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1171
1172         eth_proto_new = eth_proto_new & eth_proto_cap;
1173         if (!eth_proto_new) {
1174                 netdev_err(dev, "Not supported proto admin requested");
1175                 return -EINVAL;
1176         }
1177         if (eth_proto_new == eth_proto_admin)
1178                 return 0;
1179
1180         mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1181         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1182         if (err) {
1183                 netdev_err(dev, "Failed to set proto admin");
1184                 return err;
1185         }
1186
1187         err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1188         if (err) {
1189                 netdev_err(dev, "Failed to get oper status");
1190                 return err;
1191         }
1192         if (!is_up)
1193                 return 0;
1194
1195         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1196         if (err) {
1197                 netdev_err(dev, "Failed to set admin status");
1198                 return err;
1199         }
1200
1201         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1202         if (err) {
1203                 netdev_err(dev, "Failed to set admin status");
1204                 return err;
1205         }
1206
1207         return 0;
1208 }
1209
1210 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1211         .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
1212         .get_link               = ethtool_op_get_link,
1213         .get_strings            = mlxsw_sp_port_get_strings,
1214         .get_ethtool_stats      = mlxsw_sp_port_get_stats,
1215         .get_sset_count         = mlxsw_sp_port_get_sset_count,
1216         .get_settings           = mlxsw_sp_port_get_settings,
1217         .set_settings           = mlxsw_sp_port_set_settings,
1218 };
1219
1220 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1221 {
1222         struct mlxsw_sp_port *mlxsw_sp_port;
1223         struct net_device *dev;
1224         bool usable;
1225         int err;
1226
1227         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1228         if (!dev)
1229                 return -ENOMEM;
1230         mlxsw_sp_port = netdev_priv(dev);
1231         mlxsw_sp_port->dev = dev;
1232         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1233         mlxsw_sp_port->local_port = local_port;
1234         mlxsw_sp_port->learning = 1;
1235         mlxsw_sp_port->learning_sync = 1;
1236         mlxsw_sp_port->uc_flood = 1;
1237         mlxsw_sp_port->pvid = 1;
1238
1239         mlxsw_sp_port->pcpu_stats =
1240                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1241         if (!mlxsw_sp_port->pcpu_stats) {
1242                 err = -ENOMEM;
1243                 goto err_alloc_stats;
1244         }
1245
1246         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1247         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1248
1249         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1250         if (err) {
1251                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1252                         mlxsw_sp_port->local_port);
1253                 goto err_dev_addr_init;
1254         }
1255
1256         netif_carrier_off(dev);
1257
1258         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1259                          NETIF_F_HW_VLAN_CTAG_FILTER;
1260
1261         /* Each packet needs to have a Tx header (metadata) on top all other
1262          * headers.
1263          */
1264         dev->needed_headroom = MLXSW_TXHDR_LEN;
1265
1266         err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
1267         if (err) {
1268                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
1269                         mlxsw_sp_port->local_port);
1270                 goto err_port_module_check;
1271         }
1272
1273         if (!usable) {
1274                 dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
1275                         mlxsw_sp_port->local_port);
1276                 goto port_not_usable;
1277         }
1278
1279         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1280         if (err) {
1281                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1282                         mlxsw_sp_port->local_port);
1283                 goto err_port_system_port_mapping_set;
1284         }
1285
1286         err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1287         if (err) {
1288                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1289                         mlxsw_sp_port->local_port);
1290                 goto err_port_swid_set;
1291         }
1292
1293         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1294         if (err) {
1295                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1296                         mlxsw_sp_port->local_port);
1297                 goto err_port_mtu_set;
1298         }
1299
1300         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1301         if (err)
1302                 goto err_port_admin_status_set;
1303
1304         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1305         if (err) {
1306                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1307                         mlxsw_sp_port->local_port);
1308                 goto err_port_buffers_init;
1309         }
1310
1311         mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1312         err = register_netdev(dev);
1313         if (err) {
1314                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1315                         mlxsw_sp_port->local_port);
1316                 goto err_register_netdev;
1317         }
1318
1319         err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1320         if (err)
1321                 goto err_port_vlan_init;
1322
1323         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1324         return 0;
1325
1326 err_port_vlan_init:
1327         unregister_netdev(dev);
1328 err_register_netdev:
1329 err_port_buffers_init:
1330 err_port_admin_status_set:
1331 err_port_mtu_set:
1332 err_port_swid_set:
1333 err_port_system_port_mapping_set:
1334 port_not_usable:
1335 err_port_module_check:
1336 err_dev_addr_init:
1337         free_percpu(mlxsw_sp_port->pcpu_stats);
1338 err_alloc_stats:
1339         free_netdev(dev);
1340         return err;
1341 }
1342
1343 static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp)
1344 {
1345         u16 vfid;
1346
1347         for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID)
1348                 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
1349 }
1350
1351 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1352 {
1353         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1354
1355         if (!mlxsw_sp_port)
1356                 return;
1357         mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
1358         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1359         mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1360         free_percpu(mlxsw_sp_port->pcpu_stats);
1361         free_netdev(mlxsw_sp_port->dev);
1362 }
1363
1364 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1365 {
1366         int i;
1367
1368         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1369                 mlxsw_sp_port_remove(mlxsw_sp, i);
1370         kfree(mlxsw_sp->ports);
1371 }
1372
1373 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1374 {
1375         size_t alloc_size;
1376         int i;
1377         int err;
1378
1379         alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1380         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1381         if (!mlxsw_sp->ports)
1382                 return -ENOMEM;
1383
1384         for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1385                 err = mlxsw_sp_port_create(mlxsw_sp, i);
1386                 if (err)
1387                         goto err_port_create;
1388         }
1389         return 0;
1390
1391 err_port_create:
1392         for (i--; i >= 1; i--)
1393                 mlxsw_sp_port_remove(mlxsw_sp, i);
1394         kfree(mlxsw_sp->ports);
1395         return err;
1396 }
1397
1398 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
1399                                      char *pude_pl, void *priv)
1400 {
1401         struct mlxsw_sp *mlxsw_sp = priv;
1402         struct mlxsw_sp_port *mlxsw_sp_port;
1403         enum mlxsw_reg_pude_oper_status status;
1404         u8 local_port;
1405
1406         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1407         mlxsw_sp_port = mlxsw_sp->ports[local_port];
1408         if (!mlxsw_sp_port) {
1409                 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1410                          local_port);
1411                 return;
1412         }
1413
1414         status = mlxsw_reg_pude_oper_status_get(pude_pl);
1415         if (status == MLXSW_PORT_OPER_STATUS_UP) {
1416                 netdev_info(mlxsw_sp_port->dev, "link up\n");
1417                 netif_carrier_on(mlxsw_sp_port->dev);
1418         } else {
1419                 netdev_info(mlxsw_sp_port->dev, "link down\n");
1420                 netif_carrier_off(mlxsw_sp_port->dev);
1421         }
1422 }
1423
1424 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
1425         .func = mlxsw_sp_pude_event_func,
1426         .trap_id = MLXSW_TRAP_ID_PUDE,
1427 };
1428
1429 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
1430                                    enum mlxsw_event_trap_id trap_id)
1431 {
1432         struct mlxsw_event_listener *el;
1433         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1434         int err;
1435
1436         switch (trap_id) {
1437         case MLXSW_TRAP_ID_PUDE:
1438                 el = &mlxsw_sp_pude_event;
1439                 break;
1440         }
1441         err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
1442         if (err)
1443                 return err;
1444
1445         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
1446         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1447         if (err)
1448                 goto err_event_trap_set;
1449
1450         return 0;
1451
1452 err_event_trap_set:
1453         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1454         return err;
1455 }
1456
1457 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
1458                                       enum mlxsw_event_trap_id trap_id)
1459 {
1460         struct mlxsw_event_listener *el;
1461
1462         switch (trap_id) {
1463         case MLXSW_TRAP_ID_PUDE:
1464                 el = &mlxsw_sp_pude_event;
1465                 break;
1466         }
1467         mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1468 }
1469
1470 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
1471                                       void *priv)
1472 {
1473         struct mlxsw_sp *mlxsw_sp = priv;
1474         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1475         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1476
1477         if (unlikely(!mlxsw_sp_port)) {
1478                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
1479                                      local_port);
1480                 return;
1481         }
1482
1483         skb->dev = mlxsw_sp_port->dev;
1484
1485         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1486         u64_stats_update_begin(&pcpu_stats->syncp);
1487         pcpu_stats->rx_packets++;
1488         pcpu_stats->rx_bytes += skb->len;
1489         u64_stats_update_end(&pcpu_stats->syncp);
1490
1491         skb->protocol = eth_type_trans(skb, skb->dev);
1492         netif_receive_skb(skb);
1493 }
1494
1495 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
1496         {
1497                 .func = mlxsw_sp_rx_listener_func,
1498                 .local_port = MLXSW_PORT_DONT_CARE,
1499                 .trap_id = MLXSW_TRAP_ID_FDB_MC,
1500         },
1501         /* Traps for specific L2 packet types, not trapped as FDB MC */
1502         {
1503                 .func = mlxsw_sp_rx_listener_func,
1504                 .local_port = MLXSW_PORT_DONT_CARE,
1505                 .trap_id = MLXSW_TRAP_ID_STP,
1506         },
1507         {
1508                 .func = mlxsw_sp_rx_listener_func,
1509                 .local_port = MLXSW_PORT_DONT_CARE,
1510                 .trap_id = MLXSW_TRAP_ID_LACP,
1511         },
1512         {
1513                 .func = mlxsw_sp_rx_listener_func,
1514                 .local_port = MLXSW_PORT_DONT_CARE,
1515                 .trap_id = MLXSW_TRAP_ID_EAPOL,
1516         },
1517         {
1518                 .func = mlxsw_sp_rx_listener_func,
1519                 .local_port = MLXSW_PORT_DONT_CARE,
1520                 .trap_id = MLXSW_TRAP_ID_LLDP,
1521         },
1522         {
1523                 .func = mlxsw_sp_rx_listener_func,
1524                 .local_port = MLXSW_PORT_DONT_CARE,
1525                 .trap_id = MLXSW_TRAP_ID_MMRP,
1526         },
1527         {
1528                 .func = mlxsw_sp_rx_listener_func,
1529                 .local_port = MLXSW_PORT_DONT_CARE,
1530                 .trap_id = MLXSW_TRAP_ID_MVRP,
1531         },
1532         {
1533                 .func = mlxsw_sp_rx_listener_func,
1534                 .local_port = MLXSW_PORT_DONT_CARE,
1535                 .trap_id = MLXSW_TRAP_ID_RPVST,
1536         },
1537         {
1538                 .func = mlxsw_sp_rx_listener_func,
1539                 .local_port = MLXSW_PORT_DONT_CARE,
1540                 .trap_id = MLXSW_TRAP_ID_DHCP,
1541         },
1542         {
1543                 .func = mlxsw_sp_rx_listener_func,
1544                 .local_port = MLXSW_PORT_DONT_CARE,
1545                 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1546         },
1547         {
1548                 .func = mlxsw_sp_rx_listener_func,
1549                 .local_port = MLXSW_PORT_DONT_CARE,
1550                 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
1551         },
1552         {
1553                 .func = mlxsw_sp_rx_listener_func,
1554                 .local_port = MLXSW_PORT_DONT_CARE,
1555                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
1556         },
1557         {
1558                 .func = mlxsw_sp_rx_listener_func,
1559                 .local_port = MLXSW_PORT_DONT_CARE,
1560                 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
1561         },
1562         {
1563                 .func = mlxsw_sp_rx_listener_func,
1564                 .local_port = MLXSW_PORT_DONT_CARE,
1565                 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
1566         },
1567 };
1568
1569 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
1570 {
1571         char htgt_pl[MLXSW_REG_HTGT_LEN];
1572         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1573         int i;
1574         int err;
1575
1576         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
1577         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1578         if (err)
1579                 return err;
1580
1581         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
1582         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1583         if (err)
1584                 return err;
1585
1586         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1587                 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
1588                                                       &mlxsw_sp_rx_listener[i],
1589                                                       mlxsw_sp);
1590                 if (err)
1591                         goto err_rx_listener_register;
1592
1593                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
1594                                     mlxsw_sp_rx_listener[i].trap_id);
1595                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1596                 if (err)
1597                         goto err_rx_trap_set;
1598         }
1599         return 0;
1600
1601 err_rx_trap_set:
1602         mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1603                                           &mlxsw_sp_rx_listener[i],
1604                                           mlxsw_sp);
1605 err_rx_listener_register:
1606         for (i--; i >= 0; i--) {
1607                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1608                                     mlxsw_sp_rx_listener[i].trap_id);
1609                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1610
1611                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1612                                                   &mlxsw_sp_rx_listener[i],
1613                                                   mlxsw_sp);
1614         }
1615         return err;
1616 }
1617
1618 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
1619 {
1620         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1621         int i;
1622
1623         for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1624                 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1625                                     mlxsw_sp_rx_listener[i].trap_id);
1626                 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1627
1628                 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1629                                                   &mlxsw_sp_rx_listener[i],
1630                                                   mlxsw_sp);
1631         }
1632 }
1633
1634 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
1635                                  enum mlxsw_reg_sfgc_type type,
1636                                  enum mlxsw_reg_sfgc_bridge_type bridge_type)
1637 {
1638         enum mlxsw_flood_table_type table_type;
1639         enum mlxsw_sp_flood_table flood_table;
1640         char sfgc_pl[MLXSW_REG_SFGC_LEN];
1641
1642         if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) {
1643                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
1644                 flood_table = 0;
1645         } else {
1646                 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
1647                 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
1648                         flood_table = MLXSW_SP_FLOOD_TABLE_UC;
1649                 else
1650                         flood_table = MLXSW_SP_FLOOD_TABLE_BM;
1651         }
1652
1653         mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
1654                             flood_table);
1655         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
1656 }
1657
1658 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
1659 {
1660         int type, err;
1661
1662         /* For non-offloaded netdevs, flood all traffic types to CPU
1663          * port.
1664          */
1665         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
1666                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
1667                         continue;
1668
1669                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1670                                             MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
1671                 if (err)
1672                         return err;
1673         }
1674
1675         /* For bridged ports, use one flooding table for unknown unicast
1676          * traffic and a second table for unregistered multicast and
1677          * broadcast.
1678          */
1679         for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
1680                 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
1681                         continue;
1682
1683                 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1684                                             MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
1685                 if (err)
1686                         return err;
1687         }
1688
1689         return 0;
1690 }
1691
1692 static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
1693                          const struct mlxsw_bus_info *mlxsw_bus_info)
1694 {
1695         struct mlxsw_sp *mlxsw_sp = priv;
1696         int err;
1697
1698         mlxsw_sp->core = mlxsw_core;
1699         mlxsw_sp->bus_info = mlxsw_bus_info;
1700
1701         err = mlxsw_sp_base_mac_get(mlxsw_sp);
1702         if (err) {
1703                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
1704                 return err;
1705         }
1706
1707         err = mlxsw_sp_ports_create(mlxsw_sp);
1708         if (err) {
1709                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
1710                 goto err_ports_create;
1711         }
1712
1713         err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1714         if (err) {
1715                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
1716                 goto err_event_register;
1717         }
1718
1719         err = mlxsw_sp_traps_init(mlxsw_sp);
1720         if (err) {
1721                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
1722                 goto err_rx_listener_register;
1723         }
1724
1725         err = mlxsw_sp_flood_init(mlxsw_sp);
1726         if (err) {
1727                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
1728                 goto err_flood_init;
1729         }
1730
1731         err = mlxsw_sp_buffers_init(mlxsw_sp);
1732         if (err) {
1733                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
1734                 goto err_buffers_init;
1735         }
1736
1737         err = mlxsw_sp_switchdev_init(mlxsw_sp);
1738         if (err) {
1739                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
1740                 goto err_switchdev_init;
1741         }
1742
1743         return 0;
1744
1745 err_switchdev_init:
1746 err_buffers_init:
1747 err_flood_init:
1748         mlxsw_sp_traps_fini(mlxsw_sp);
1749 err_rx_listener_register:
1750         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1751 err_event_register:
1752         mlxsw_sp_ports_remove(mlxsw_sp);
1753 err_ports_create:
1754         mlxsw_sp_vfids_fini(mlxsw_sp);
1755         return err;
1756 }
1757
1758 static void mlxsw_sp_fini(void *priv)
1759 {
1760         struct mlxsw_sp *mlxsw_sp = priv;
1761
1762         mlxsw_sp_switchdev_fini(mlxsw_sp);
1763         mlxsw_sp_traps_fini(mlxsw_sp);
1764         mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1765         mlxsw_sp_ports_remove(mlxsw_sp);
1766         mlxsw_sp_vfids_fini(mlxsw_sp);
1767 }
1768
1769 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
1770         .used_max_vepa_channels         = 1,
1771         .max_vepa_channels              = 0,
1772         .used_max_lag                   = 1,
1773         .max_lag                        = 64,
1774         .used_max_port_per_lag          = 1,
1775         .max_port_per_lag               = 16,
1776         .used_max_mid                   = 1,
1777         .max_mid                        = 7000,
1778         .used_max_pgt                   = 1,
1779         .max_pgt                        = 0,
1780         .used_max_system_port           = 1,
1781         .max_system_port                = 64,
1782         .used_max_vlan_groups           = 1,
1783         .max_vlan_groups                = 127,
1784         .used_max_regions               = 1,
1785         .max_regions                    = 400,
1786         .used_flood_tables              = 1,
1787         .used_flood_mode                = 1,
1788         .flood_mode                     = 3,
1789         .max_fid_offset_flood_tables    = 2,
1790         .fid_offset_flood_table_size    = VLAN_N_VID - 1,
1791         .max_fid_flood_tables           = 1,
1792         .fid_flood_table_size           = VLAN_N_VID,
1793         .used_max_ib_mc                 = 1,
1794         .max_ib_mc                      = 0,
1795         .used_max_pkey                  = 1,
1796         .max_pkey                       = 0,
1797         .swid_config                    = {
1798                 {
1799                         .used_type      = 1,
1800                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
1801                 }
1802         },
1803 };
1804
1805 static struct mlxsw_driver mlxsw_sp_driver = {
1806         .kind                   = MLXSW_DEVICE_KIND_SPECTRUM,
1807         .owner                  = THIS_MODULE,
1808         .priv_size              = sizeof(struct mlxsw_sp),
1809         .init                   = mlxsw_sp_init,
1810         .fini                   = mlxsw_sp_fini,
1811         .txhdr_construct        = mlxsw_sp_txhdr_construct,
1812         .txhdr_len              = MLXSW_TXHDR_LEN,
1813         .profile                = &mlxsw_sp_config_profile,
1814 };
1815
1816 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
1817 {
1818         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
1819 }
1820
1821 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
1822 {
1823         struct net_device *dev = mlxsw_sp_port->dev;
1824         int err;
1825
1826         /* When port is not bridged untagged packets are tagged with
1827          * PVID=VID=1, thereby creating an implicit VLAN interface in
1828          * the device. Remove it and let bridge code take care of its
1829          * own VLANs.
1830          */
1831         err = mlxsw_sp_port_kill_vid(dev, 0, 1);
1832         if (err)
1833                 netdev_err(dev, "Failed to remove VID 1\n");
1834
1835         return err;
1836 }
1837
1838 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
1839 {
1840         struct net_device *dev = mlxsw_sp_port->dev;
1841         int err;
1842
1843         /* Add implicit VLAN interface in the device, so that untagged
1844          * packets will be classified to the default vFID.
1845          */
1846         err = mlxsw_sp_port_add_vid(dev, 0, 1);
1847         if (err)
1848                 netdev_err(dev, "Failed to add VID 1\n");
1849
1850         return err;
1851 }
1852
1853 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
1854                                          struct net_device *br_dev)
1855 {
1856         return !mlxsw_sp->master_bridge.dev ||
1857                mlxsw_sp->master_bridge.dev == br_dev;
1858 }
1859
1860 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
1861                                        struct net_device *br_dev)
1862 {
1863         mlxsw_sp->master_bridge.dev = br_dev;
1864         mlxsw_sp->master_bridge.ref_count++;
1865 }
1866
1867 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
1868                                        struct net_device *br_dev)
1869 {
1870         if (--mlxsw_sp->master_bridge.ref_count == 0)
1871                 mlxsw_sp->master_bridge.dev = NULL;
1872 }
1873
1874 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
1875                                     unsigned long event, void *ptr)
1876 {
1877         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1878         struct netdev_notifier_changeupper_info *info;
1879         struct mlxsw_sp_port *mlxsw_sp_port;
1880         struct net_device *upper_dev;
1881         struct mlxsw_sp *mlxsw_sp;
1882         int err;
1883
1884         if (!mlxsw_sp_port_dev_check(dev))
1885                 return NOTIFY_DONE;
1886
1887         mlxsw_sp_port = netdev_priv(dev);
1888         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1889         info = ptr;
1890
1891         switch (event) {
1892         case NETDEV_PRECHANGEUPPER:
1893                 upper_dev = info->upper_dev;
1894                 /* HW limitation forbids to put ports to multiple bridges. */
1895                 if (info->master && info->linking &&
1896                     netif_is_bridge_master(upper_dev) &&
1897                     !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
1898                         return NOTIFY_BAD;
1899                 break;
1900         case NETDEV_CHANGEUPPER:
1901                 upper_dev = info->upper_dev;
1902                 if (info->master &&
1903                     netif_is_bridge_master(upper_dev)) {
1904                         if (info->linking) {
1905                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
1906                                 if (err)
1907                                         netdev_err(dev, "Failed to join bridge\n");
1908                                 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
1909                                 mlxsw_sp_port->bridged = 1;
1910                         } else {
1911                                 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
1912                                 if (err)
1913                                         netdev_err(dev, "Failed to leave bridge\n");
1914                                 mlxsw_sp_port->bridged = 0;
1915                                 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
1916                         }
1917                 }
1918                 break;
1919         }
1920
1921         return NOTIFY_DONE;
1922 }
1923
1924 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
1925         .notifier_call = mlxsw_sp_netdevice_event,
1926 };
1927
1928 static int __init mlxsw_sp_module_init(void)
1929 {
1930         int err;
1931
1932         register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
1933         err = mlxsw_core_driver_register(&mlxsw_sp_driver);
1934         if (err)
1935                 goto err_core_driver_register;
1936         return 0;
1937
1938 err_core_driver_register:
1939         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
1940         return err;
1941 }
1942
1943 static void __exit mlxsw_sp_module_exit(void)
1944 {
1945         mlxsw_core_driver_unregister(&mlxsw_sp_driver);
1946         unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
1947 }
1948
1949 module_init(mlxsw_sp_module_init);
1950 module_exit(mlxsw_sp_module_exit);
1951
1952 MODULE_LICENSE("Dual BSD/GPL");
1953 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1954 MODULE_DESCRIPTION("Mellanox Spectrum driver");
1955 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);