GNU Linux-libre 4.19.264-gnu1
[releases.git] / net / dcb / dcbnl.c
1 /*
2  * Copyright (c) 2008-2011, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, see <http://www.gnu.org/licenses/>.
15  *
16  * Description: Data Center Bridging netlink interface
17  * Author: Lucy Liu <lucy.liu@intel.com>
18  */
19
20 #include <linux/netdevice.h>
21 #include <linux/netlink.h>
22 #include <linux/slab.h>
23 #include <net/netlink.h>
24 #include <net/rtnetlink.h>
25 #include <linux/dcbnl.h>
26 #include <net/dcbevent.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/init.h>
29 #include <net/sock.h>
30
31 /* Data Center Bridging (DCB) is a collection of Ethernet enhancements
32  * intended to allow network traffic with differing requirements
33  * (highly reliable, no drops vs. best effort vs. low latency) to operate
34  * and co-exist on Ethernet.  Current DCB features are:
35  *
36  * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
37  *   framework for assigning bandwidth guarantees to traffic classes.
38  *
39  * Priority-based Flow Control (PFC) - provides a flow control mechanism which
40  *   can work independently for each 802.1p priority.
41  *
42  * Congestion Notification - provides a mechanism for end-to-end congestion
43  *   control for protocols which do not have built-in congestion management.
44  *
45  * More information about the emerging standards for these Ethernet features
46  * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
47  *
48  * This file implements an rtnetlink interface to allow configuration of DCB
49  * features for capable devices.
50  */
51
52 /**************** DCB attribute policies *************************************/
53
54 /* DCB netlink attributes policy */
55 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
56         [DCB_ATTR_IFNAME]      = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
57         [DCB_ATTR_STATE]       = {.type = NLA_U8},
58         [DCB_ATTR_PFC_CFG]     = {.type = NLA_NESTED},
59         [DCB_ATTR_PG_CFG]      = {.type = NLA_NESTED},
60         [DCB_ATTR_SET_ALL]     = {.type = NLA_U8},
61         [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
62         [DCB_ATTR_CAP]         = {.type = NLA_NESTED},
63         [DCB_ATTR_PFC_STATE]   = {.type = NLA_U8},
64         [DCB_ATTR_BCN]         = {.type = NLA_NESTED},
65         [DCB_ATTR_APP]         = {.type = NLA_NESTED},
66         [DCB_ATTR_IEEE]        = {.type = NLA_NESTED},
67         [DCB_ATTR_DCBX]        = {.type = NLA_U8},
68         [DCB_ATTR_FEATCFG]     = {.type = NLA_NESTED},
69 };
70
71 /* DCB priority flow control to User Priority nested attributes */
72 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
73         [DCB_PFC_UP_ATTR_0]   = {.type = NLA_U8},
74         [DCB_PFC_UP_ATTR_1]   = {.type = NLA_U8},
75         [DCB_PFC_UP_ATTR_2]   = {.type = NLA_U8},
76         [DCB_PFC_UP_ATTR_3]   = {.type = NLA_U8},
77         [DCB_PFC_UP_ATTR_4]   = {.type = NLA_U8},
78         [DCB_PFC_UP_ATTR_5]   = {.type = NLA_U8},
79         [DCB_PFC_UP_ATTR_6]   = {.type = NLA_U8},
80         [DCB_PFC_UP_ATTR_7]   = {.type = NLA_U8},
81         [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
82 };
83
84 /* DCB priority grouping nested attributes */
85 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
86         [DCB_PG_ATTR_TC_0]      = {.type = NLA_NESTED},
87         [DCB_PG_ATTR_TC_1]      = {.type = NLA_NESTED},
88         [DCB_PG_ATTR_TC_2]      = {.type = NLA_NESTED},
89         [DCB_PG_ATTR_TC_3]      = {.type = NLA_NESTED},
90         [DCB_PG_ATTR_TC_4]      = {.type = NLA_NESTED},
91         [DCB_PG_ATTR_TC_5]      = {.type = NLA_NESTED},
92         [DCB_PG_ATTR_TC_6]      = {.type = NLA_NESTED},
93         [DCB_PG_ATTR_TC_7]      = {.type = NLA_NESTED},
94         [DCB_PG_ATTR_TC_ALL]    = {.type = NLA_NESTED},
95         [DCB_PG_ATTR_BW_ID_0]   = {.type = NLA_U8},
96         [DCB_PG_ATTR_BW_ID_1]   = {.type = NLA_U8},
97         [DCB_PG_ATTR_BW_ID_2]   = {.type = NLA_U8},
98         [DCB_PG_ATTR_BW_ID_3]   = {.type = NLA_U8},
99         [DCB_PG_ATTR_BW_ID_4]   = {.type = NLA_U8},
100         [DCB_PG_ATTR_BW_ID_5]   = {.type = NLA_U8},
101         [DCB_PG_ATTR_BW_ID_6]   = {.type = NLA_U8},
102         [DCB_PG_ATTR_BW_ID_7]   = {.type = NLA_U8},
103         [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
104 };
105
106 /* DCB traffic class nested attributes. */
107 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
108         [DCB_TC_ATTR_PARAM_PGID]            = {.type = NLA_U8},
109         [DCB_TC_ATTR_PARAM_UP_MAPPING]      = {.type = NLA_U8},
110         [DCB_TC_ATTR_PARAM_STRICT_PRIO]     = {.type = NLA_U8},
111         [DCB_TC_ATTR_PARAM_BW_PCT]          = {.type = NLA_U8},
112         [DCB_TC_ATTR_PARAM_ALL]             = {.type = NLA_FLAG},
113 };
114
115 /* DCB capabilities nested attributes. */
116 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
117         [DCB_CAP_ATTR_ALL]     = {.type = NLA_FLAG},
118         [DCB_CAP_ATTR_PG]      = {.type = NLA_U8},
119         [DCB_CAP_ATTR_PFC]     = {.type = NLA_U8},
120         [DCB_CAP_ATTR_UP2TC]   = {.type = NLA_U8},
121         [DCB_CAP_ATTR_PG_TCS]  = {.type = NLA_U8},
122         [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
123         [DCB_CAP_ATTR_GSP]     = {.type = NLA_U8},
124         [DCB_CAP_ATTR_BCN]     = {.type = NLA_U8},
125         [DCB_CAP_ATTR_DCBX]    = {.type = NLA_U8},
126 };
127
128 /* DCB capabilities nested attributes. */
129 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
130         [DCB_NUMTCS_ATTR_ALL]     = {.type = NLA_FLAG},
131         [DCB_NUMTCS_ATTR_PG]      = {.type = NLA_U8},
132         [DCB_NUMTCS_ATTR_PFC]     = {.type = NLA_U8},
133 };
134
135 /* DCB BCN nested attributes. */
136 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
137         [DCB_BCN_ATTR_RP_0]         = {.type = NLA_U8},
138         [DCB_BCN_ATTR_RP_1]         = {.type = NLA_U8},
139         [DCB_BCN_ATTR_RP_2]         = {.type = NLA_U8},
140         [DCB_BCN_ATTR_RP_3]         = {.type = NLA_U8},
141         [DCB_BCN_ATTR_RP_4]         = {.type = NLA_U8},
142         [DCB_BCN_ATTR_RP_5]         = {.type = NLA_U8},
143         [DCB_BCN_ATTR_RP_6]         = {.type = NLA_U8},
144         [DCB_BCN_ATTR_RP_7]         = {.type = NLA_U8},
145         [DCB_BCN_ATTR_RP_ALL]       = {.type = NLA_FLAG},
146         [DCB_BCN_ATTR_BCNA_0]       = {.type = NLA_U32},
147         [DCB_BCN_ATTR_BCNA_1]       = {.type = NLA_U32},
148         [DCB_BCN_ATTR_ALPHA]        = {.type = NLA_U32},
149         [DCB_BCN_ATTR_BETA]         = {.type = NLA_U32},
150         [DCB_BCN_ATTR_GD]           = {.type = NLA_U32},
151         [DCB_BCN_ATTR_GI]           = {.type = NLA_U32},
152         [DCB_BCN_ATTR_TMAX]         = {.type = NLA_U32},
153         [DCB_BCN_ATTR_TD]           = {.type = NLA_U32},
154         [DCB_BCN_ATTR_RMIN]         = {.type = NLA_U32},
155         [DCB_BCN_ATTR_W]            = {.type = NLA_U32},
156         [DCB_BCN_ATTR_RD]           = {.type = NLA_U32},
157         [DCB_BCN_ATTR_RU]           = {.type = NLA_U32},
158         [DCB_BCN_ATTR_WRTT]         = {.type = NLA_U32},
159         [DCB_BCN_ATTR_RI]           = {.type = NLA_U32},
160         [DCB_BCN_ATTR_C]            = {.type = NLA_U32},
161         [DCB_BCN_ATTR_ALL]          = {.type = NLA_FLAG},
162 };
163
164 /* DCB APP nested attributes. */
165 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
166         [DCB_APP_ATTR_IDTYPE]       = {.type = NLA_U8},
167         [DCB_APP_ATTR_ID]           = {.type = NLA_U16},
168         [DCB_APP_ATTR_PRIORITY]     = {.type = NLA_U8},
169 };
170
171 /* IEEE 802.1Qaz nested attributes. */
172 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
173         [DCB_ATTR_IEEE_ETS]         = {.len = sizeof(struct ieee_ets)},
174         [DCB_ATTR_IEEE_PFC]         = {.len = sizeof(struct ieee_pfc)},
175         [DCB_ATTR_IEEE_APP_TABLE]   = {.type = NLA_NESTED},
176         [DCB_ATTR_IEEE_MAXRATE]   = {.len = sizeof(struct ieee_maxrate)},
177         [DCB_ATTR_IEEE_QCN]         = {.len = sizeof(struct ieee_qcn)},
178         [DCB_ATTR_IEEE_QCN_STATS]   = {.len = sizeof(struct ieee_qcn_stats)},
179         [DCB_ATTR_DCB_BUFFER]       = {.len = sizeof(struct dcbnl_buffer)},
180 };
181
182 /* DCB number of traffic classes nested attributes. */
183 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
184         [DCB_FEATCFG_ATTR_ALL]      = {.type = NLA_FLAG},
185         [DCB_FEATCFG_ATTR_PG]       = {.type = NLA_U8},
186         [DCB_FEATCFG_ATTR_PFC]      = {.type = NLA_U8},
187         [DCB_FEATCFG_ATTR_APP]      = {.type = NLA_U8},
188 };
189
190 static LIST_HEAD(dcb_app_list);
191 static DEFINE_SPINLOCK(dcb_lock);
192
193 static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
194                                     u32 flags, struct nlmsghdr **nlhp)
195 {
196         struct sk_buff *skb;
197         struct dcbmsg *dcb;
198         struct nlmsghdr *nlh;
199
200         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
201         if (!skb)
202                 return NULL;
203
204         nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
205         BUG_ON(!nlh);
206
207         dcb = nlmsg_data(nlh);
208         dcb->dcb_family = AF_UNSPEC;
209         dcb->cmd = cmd;
210         dcb->dcb_pad = 0;
211
212         if (nlhp)
213                 *nlhp = nlh;
214
215         return skb;
216 }
217
218 static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
219                           u32 seq, struct nlattr **tb, struct sk_buff *skb)
220 {
221         /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
222         if (!netdev->dcbnl_ops->getstate)
223                 return -EOPNOTSUPP;
224
225         return nla_put_u8(skb, DCB_ATTR_STATE,
226                           netdev->dcbnl_ops->getstate(netdev));
227 }
228
229 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
230                            u32 seq, struct nlattr **tb, struct sk_buff *skb)
231 {
232         struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
233         u8 value;
234         int ret;
235         int i;
236         int getall = 0;
237
238         if (!tb[DCB_ATTR_PFC_CFG])
239                 return -EINVAL;
240
241         if (!netdev->dcbnl_ops->getpfccfg)
242                 return -EOPNOTSUPP;
243
244         ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
245                                tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL);
246         if (ret)
247                 return ret;
248
249         nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG);
250         if (!nest)
251                 return -EMSGSIZE;
252
253         if (data[DCB_PFC_UP_ATTR_ALL])
254                 getall = 1;
255
256         for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
257                 if (!getall && !data[i])
258                         continue;
259
260                 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
261                                              &value);
262                 ret = nla_put_u8(skb, i, value);
263                 if (ret) {
264                         nla_nest_cancel(skb, nest);
265                         return ret;
266                 }
267         }
268         nla_nest_end(skb, nest);
269
270         return 0;
271 }
272
273 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
274                                 u32 seq, struct nlattr **tb, struct sk_buff *skb)
275 {
276         u8 perm_addr[MAX_ADDR_LEN];
277
278         if (!netdev->dcbnl_ops->getpermhwaddr)
279                 return -EOPNOTSUPP;
280
281         memset(perm_addr, 0, sizeof(perm_addr));
282         netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
283
284         return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
285 }
286
287 static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
288                         u32 seq, struct nlattr **tb, struct sk_buff *skb)
289 {
290         struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
291         u8 value;
292         int ret;
293         int i;
294         int getall = 0;
295
296         if (!tb[DCB_ATTR_CAP])
297                 return -EINVAL;
298
299         if (!netdev->dcbnl_ops->getcap)
300                 return -EOPNOTSUPP;
301
302         ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
303                                dcbnl_cap_nest, NULL);
304         if (ret)
305                 return ret;
306
307         nest = nla_nest_start(skb, DCB_ATTR_CAP);
308         if (!nest)
309                 return -EMSGSIZE;
310
311         if (data[DCB_CAP_ATTR_ALL])
312                 getall = 1;
313
314         for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
315                 if (!getall && !data[i])
316                         continue;
317
318                 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
319                         ret = nla_put_u8(skb, i, value);
320                         if (ret) {
321                                 nla_nest_cancel(skb, nest);
322                                 return ret;
323                         }
324                 }
325         }
326         nla_nest_end(skb, nest);
327
328         return 0;
329 }
330
331 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
332                            u32 seq, struct nlattr **tb, struct sk_buff *skb)
333 {
334         struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
335         u8 value;
336         int ret;
337         int i;
338         int getall = 0;
339
340         if (!tb[DCB_ATTR_NUMTCS])
341                 return -EINVAL;
342
343         if (!netdev->dcbnl_ops->getnumtcs)
344                 return -EOPNOTSUPP;
345
346         ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
347                                dcbnl_numtcs_nest, NULL);
348         if (ret)
349                 return ret;
350
351         nest = nla_nest_start(skb, DCB_ATTR_NUMTCS);
352         if (!nest)
353                 return -EMSGSIZE;
354
355         if (data[DCB_NUMTCS_ATTR_ALL])
356                 getall = 1;
357
358         for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
359                 if (!getall && !data[i])
360                         continue;
361
362                 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
363                 if (!ret) {
364                         ret = nla_put_u8(skb, i, value);
365                         if (ret) {
366                                 nla_nest_cancel(skb, nest);
367                                 return ret;
368                         }
369                 } else
370                         return -EINVAL;
371         }
372         nla_nest_end(skb, nest);
373
374         return 0;
375 }
376
377 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
378                            u32 seq, struct nlattr **tb, struct sk_buff *skb)
379 {
380         struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
381         int ret;
382         u8 value;
383         int i;
384
385         if (!tb[DCB_ATTR_NUMTCS])
386                 return -EINVAL;
387
388         if (!netdev->dcbnl_ops->setnumtcs)
389                 return -EOPNOTSUPP;
390
391         ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
392                                dcbnl_numtcs_nest, NULL);
393         if (ret)
394                 return ret;
395
396         for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
397                 if (data[i] == NULL)
398                         continue;
399
400                 value = nla_get_u8(data[i]);
401
402                 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
403                 if (ret)
404                         break;
405         }
406
407         return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
408 }
409
410 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
411                              u32 seq, struct nlattr **tb, struct sk_buff *skb)
412 {
413         if (!netdev->dcbnl_ops->getpfcstate)
414                 return -EOPNOTSUPP;
415
416         return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
417                           netdev->dcbnl_ops->getpfcstate(netdev));
418 }
419
420 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
421                              u32 seq, struct nlattr **tb, struct sk_buff *skb)
422 {
423         u8 value;
424
425         if (!tb[DCB_ATTR_PFC_STATE])
426                 return -EINVAL;
427
428         if (!netdev->dcbnl_ops->setpfcstate)
429                 return -EOPNOTSUPP;
430
431         value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
432
433         netdev->dcbnl_ops->setpfcstate(netdev, value);
434
435         return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
436 }
437
438 static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
439                         u32 seq, struct nlattr **tb, struct sk_buff *skb)
440 {
441         struct nlattr *app_nest;
442         struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
443         u16 id;
444         u8 up, idtype;
445         int ret;
446
447         if (!tb[DCB_ATTR_APP])
448                 return -EINVAL;
449
450         ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
451                                dcbnl_app_nest, NULL);
452         if (ret)
453                 return ret;
454
455         /* all must be non-null */
456         if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
457             (!app_tb[DCB_APP_ATTR_ID]))
458                 return -EINVAL;
459
460         /* either by eth type or by socket number */
461         idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
462         if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
463             (idtype != DCB_APP_IDTYPE_PORTNUM))
464                 return -EINVAL;
465
466         id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
467
468         if (netdev->dcbnl_ops->getapp) {
469                 ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
470                 if (ret < 0)
471                         return ret;
472                 else
473                         up = ret;
474         } else {
475                 struct dcb_app app = {
476                                         .selector = idtype,
477                                         .protocol = id,
478                                      };
479                 up = dcb_getapp(netdev, &app);
480         }
481
482         app_nest = nla_nest_start(skb, DCB_ATTR_APP);
483         if (!app_nest)
484                 return -EMSGSIZE;
485
486         ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
487         if (ret)
488                 goto out_cancel;
489
490         ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
491         if (ret)
492                 goto out_cancel;
493
494         ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
495         if (ret)
496                 goto out_cancel;
497
498         nla_nest_end(skb, app_nest);
499
500         return 0;
501
502 out_cancel:
503         nla_nest_cancel(skb, app_nest);
504         return ret;
505 }
506
507 static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
508                         u32 seq, struct nlattr **tb, struct sk_buff *skb)
509 {
510         int ret;
511         u16 id;
512         u8 up, idtype;
513         struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
514
515         if (!tb[DCB_ATTR_APP])
516                 return -EINVAL;
517
518         ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
519                                dcbnl_app_nest, NULL);
520         if (ret)
521                 return ret;
522
523         /* all must be non-null */
524         if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
525             (!app_tb[DCB_APP_ATTR_ID]) ||
526             (!app_tb[DCB_APP_ATTR_PRIORITY]))
527                 return -EINVAL;
528
529         /* either by eth type or by socket number */
530         idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
531         if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
532             (idtype != DCB_APP_IDTYPE_PORTNUM))
533                 return -EINVAL;
534
535         id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
536         up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
537
538         if (netdev->dcbnl_ops->setapp) {
539                 ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
540                 if (ret < 0)
541                         return ret;
542         } else {
543                 struct dcb_app app;
544                 app.selector = idtype;
545                 app.protocol = id;
546                 app.priority = up;
547                 ret = dcb_setapp(netdev, &app);
548         }
549
550         ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
551         dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
552
553         return ret;
554 }
555
556 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
557                              struct nlattr **tb, struct sk_buff *skb, int dir)
558 {
559         struct nlattr *pg_nest, *param_nest, *data;
560         struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
561         struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
562         u8 prio, pgid, tc_pct, up_map;
563         int ret;
564         int getall = 0;
565         int i;
566
567         if (!tb[DCB_ATTR_PG_CFG])
568                 return -EINVAL;
569
570         if (!netdev->dcbnl_ops->getpgtccfgtx ||
571             !netdev->dcbnl_ops->getpgtccfgrx ||
572             !netdev->dcbnl_ops->getpgbwgcfgtx ||
573             !netdev->dcbnl_ops->getpgbwgcfgrx)
574                 return -EOPNOTSUPP;
575
576         ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG],
577                                dcbnl_pg_nest, NULL);
578         if (ret)
579                 return ret;
580
581         pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG);
582         if (!pg_nest)
583                 return -EMSGSIZE;
584
585         if (pg_tb[DCB_PG_ATTR_TC_ALL])
586                 getall = 1;
587
588         for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
589                 if (!getall && !pg_tb[i])
590                         continue;
591
592                 if (pg_tb[DCB_PG_ATTR_TC_ALL])
593                         data = pg_tb[DCB_PG_ATTR_TC_ALL];
594                 else
595                         data = pg_tb[i];
596                 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, data,
597                                        dcbnl_tc_param_nest, NULL);
598                 if (ret)
599                         goto err_pg;
600
601                 param_nest = nla_nest_start(skb, i);
602                 if (!param_nest)
603                         goto err_pg;
604
605                 pgid = DCB_ATTR_VALUE_UNDEFINED;
606                 prio = DCB_ATTR_VALUE_UNDEFINED;
607                 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
608                 up_map = DCB_ATTR_VALUE_UNDEFINED;
609
610                 if (dir) {
611                         /* Rx */
612                         netdev->dcbnl_ops->getpgtccfgrx(netdev,
613                                                 i - DCB_PG_ATTR_TC_0, &prio,
614                                                 &pgid, &tc_pct, &up_map);
615                 } else {
616                         /* Tx */
617                         netdev->dcbnl_ops->getpgtccfgtx(netdev,
618                                                 i - DCB_PG_ATTR_TC_0, &prio,
619                                                 &pgid, &tc_pct, &up_map);
620                 }
621
622                 if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
623                     param_tb[DCB_TC_ATTR_PARAM_ALL]) {
624                         ret = nla_put_u8(skb,
625                                          DCB_TC_ATTR_PARAM_PGID, pgid);
626                         if (ret)
627                                 goto err_param;
628                 }
629                 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
630                     param_tb[DCB_TC_ATTR_PARAM_ALL]) {
631                         ret = nla_put_u8(skb,
632                                          DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
633                         if (ret)
634                                 goto err_param;
635                 }
636                 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
637                     param_tb[DCB_TC_ATTR_PARAM_ALL]) {
638                         ret = nla_put_u8(skb,
639                                          DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
640                         if (ret)
641                                 goto err_param;
642                 }
643                 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
644                     param_tb[DCB_TC_ATTR_PARAM_ALL]) {
645                         ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
646                                          tc_pct);
647                         if (ret)
648                                 goto err_param;
649                 }
650                 nla_nest_end(skb, param_nest);
651         }
652
653         if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
654                 getall = 1;
655         else
656                 getall = 0;
657
658         for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
659                 if (!getall && !pg_tb[i])
660                         continue;
661
662                 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
663
664                 if (dir) {
665                         /* Rx */
666                         netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
667                                         i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
668                 } else {
669                         /* Tx */
670                         netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
671                                         i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
672                 }
673                 ret = nla_put_u8(skb, i, tc_pct);
674                 if (ret)
675                         goto err_pg;
676         }
677
678         nla_nest_end(skb, pg_nest);
679
680         return 0;
681
682 err_param:
683         nla_nest_cancel(skb, param_nest);
684 err_pg:
685         nla_nest_cancel(skb, pg_nest);
686
687         return -EMSGSIZE;
688 }
689
690 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
691                              u32 seq, struct nlattr **tb, struct sk_buff *skb)
692 {
693         return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
694 }
695
696 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
697                              u32 seq, struct nlattr **tb, struct sk_buff *skb)
698 {
699         return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
700 }
701
702 static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
703                           u32 seq, struct nlattr **tb, struct sk_buff *skb)
704 {
705         u8 value;
706
707         if (!tb[DCB_ATTR_STATE])
708                 return -EINVAL;
709
710         if (!netdev->dcbnl_ops->setstate)
711                 return -EOPNOTSUPP;
712
713         value = nla_get_u8(tb[DCB_ATTR_STATE]);
714
715         return nla_put_u8(skb, DCB_ATTR_STATE,
716                           netdev->dcbnl_ops->setstate(netdev, value));
717 }
718
719 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
720                            u32 seq, struct nlattr **tb, struct sk_buff *skb)
721 {
722         struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
723         int i;
724         int ret;
725         u8 value;
726
727         if (!tb[DCB_ATTR_PFC_CFG])
728                 return -EINVAL;
729
730         if (!netdev->dcbnl_ops->setpfccfg)
731                 return -EOPNOTSUPP;
732
733         ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
734                                tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL);
735         if (ret)
736                 return ret;
737
738         for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
739                 if (data[i] == NULL)
740                         continue;
741                 value = nla_get_u8(data[i]);
742                 netdev->dcbnl_ops->setpfccfg(netdev,
743                         data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
744         }
745
746         return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
747 }
748
749 static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
750                         u32 seq, struct nlattr **tb, struct sk_buff *skb)
751 {
752         int ret;
753
754         if (!tb[DCB_ATTR_SET_ALL])
755                 return -EINVAL;
756
757         if (!netdev->dcbnl_ops->setall)
758                 return -EOPNOTSUPP;
759
760         ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
761                          netdev->dcbnl_ops->setall(netdev));
762         dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
763
764         return ret;
765 }
766
767 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
768                              u32 seq, struct nlattr **tb, struct sk_buff *skb,
769                              int dir)
770 {
771         struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
772         struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
773         int ret;
774         int i;
775         u8 pgid;
776         u8 up_map;
777         u8 prio;
778         u8 tc_pct;
779
780         if (!tb[DCB_ATTR_PG_CFG])
781                 return -EINVAL;
782
783         if (!netdev->dcbnl_ops->setpgtccfgtx ||
784             !netdev->dcbnl_ops->setpgtccfgrx ||
785             !netdev->dcbnl_ops->setpgbwgcfgtx ||
786             !netdev->dcbnl_ops->setpgbwgcfgrx)
787                 return -EOPNOTSUPP;
788
789         ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG],
790                                dcbnl_pg_nest, NULL);
791         if (ret)
792                 return ret;
793
794         for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
795                 if (!pg_tb[i])
796                         continue;
797
798                 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
799                                        pg_tb[i], dcbnl_tc_param_nest, NULL);
800                 if (ret)
801                         return ret;
802
803                 pgid = DCB_ATTR_VALUE_UNDEFINED;
804                 prio = DCB_ATTR_VALUE_UNDEFINED;
805                 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
806                 up_map = DCB_ATTR_VALUE_UNDEFINED;
807
808                 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
809                         prio =
810                             nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
811
812                 if (param_tb[DCB_TC_ATTR_PARAM_PGID])
813                         pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
814
815                 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
816                         tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
817
818                 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
819                         up_map =
820                              nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
821
822                 /* dir: Tx = 0, Rx = 1 */
823                 if (dir) {
824                         /* Rx */
825                         netdev->dcbnl_ops->setpgtccfgrx(netdev,
826                                 i - DCB_PG_ATTR_TC_0,
827                                 prio, pgid, tc_pct, up_map);
828                 } else {
829                         /* Tx */
830                         netdev->dcbnl_ops->setpgtccfgtx(netdev,
831                                 i - DCB_PG_ATTR_TC_0,
832                                 prio, pgid, tc_pct, up_map);
833                 }
834         }
835
836         for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
837                 if (!pg_tb[i])
838                         continue;
839
840                 tc_pct = nla_get_u8(pg_tb[i]);
841
842                 /* dir: Tx = 0, Rx = 1 */
843                 if (dir) {
844                         /* Rx */
845                         netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
846                                          i - DCB_PG_ATTR_BW_ID_0, tc_pct);
847                 } else {
848                         /* Tx */
849                         netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
850                                          i - DCB_PG_ATTR_BW_ID_0, tc_pct);
851                 }
852         }
853
854         return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
855 }
856
857 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
858                              u32 seq, struct nlattr **tb, struct sk_buff *skb)
859 {
860         return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
861 }
862
863 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
864                              u32 seq, struct nlattr **tb, struct sk_buff *skb)
865 {
866         return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
867 }
868
869 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
870                             u32 seq, struct nlattr **tb, struct sk_buff *skb)
871 {
872         struct nlattr *bcn_nest;
873         struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
874         u8 value_byte;
875         u32 value_integer;
876         int ret;
877         bool getall = false;
878         int i;
879
880         if (!tb[DCB_ATTR_BCN])
881                 return -EINVAL;
882
883         if (!netdev->dcbnl_ops->getbcnrp ||
884             !netdev->dcbnl_ops->getbcncfg)
885                 return -EOPNOTSUPP;
886
887         ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN],
888                                dcbnl_bcn_nest, NULL);
889         if (ret)
890                 return ret;
891
892         bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN);
893         if (!bcn_nest)
894                 return -EMSGSIZE;
895
896         if (bcn_tb[DCB_BCN_ATTR_ALL])
897                 getall = true;
898
899         for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
900                 if (!getall && !bcn_tb[i])
901                         continue;
902
903                 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
904                                             &value_byte);
905                 ret = nla_put_u8(skb, i, value_byte);
906                 if (ret)
907                         goto err_bcn;
908         }
909
910         for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
911                 if (!getall && !bcn_tb[i])
912                         continue;
913
914                 netdev->dcbnl_ops->getbcncfg(netdev, i,
915                                              &value_integer);
916                 ret = nla_put_u32(skb, i, value_integer);
917                 if (ret)
918                         goto err_bcn;
919         }
920
921         nla_nest_end(skb, bcn_nest);
922
923         return 0;
924
925 err_bcn:
926         nla_nest_cancel(skb, bcn_nest);
927         return ret;
928 }
929
930 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
931                             u32 seq, struct nlattr **tb, struct sk_buff *skb)
932 {
933         struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
934         int i;
935         int ret;
936         u8 value_byte;
937         u32 value_int;
938
939         if (!tb[DCB_ATTR_BCN])
940                 return -EINVAL;
941
942         if (!netdev->dcbnl_ops->setbcncfg ||
943             !netdev->dcbnl_ops->setbcnrp)
944                 return -EOPNOTSUPP;
945
946         ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN],
947                                dcbnl_pfc_up_nest, NULL);
948         if (ret)
949                 return ret;
950
951         for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
952                 if (data[i] == NULL)
953                         continue;
954                 value_byte = nla_get_u8(data[i]);
955                 netdev->dcbnl_ops->setbcnrp(netdev,
956                         data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
957         }
958
959         for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
960                 if (data[i] == NULL)
961                         continue;
962                 value_int = nla_get_u32(data[i]);
963                 netdev->dcbnl_ops->setbcncfg(netdev,
964                                              i, value_int);
965         }
966
967         return nla_put_u8(skb, DCB_ATTR_BCN, 0);
968 }
969
970 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
971                                 int app_nested_type, int app_info_type,
972                                 int app_entry_type)
973 {
974         struct dcb_peer_app_info info;
975         struct dcb_app *table = NULL;
976         const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
977         u16 app_count;
978         int err;
979
980
981         /**
982          * retrieve the peer app configuration form the driver. If the driver
983          * handlers fail exit without doing anything
984          */
985         err = ops->peer_getappinfo(netdev, &info, &app_count);
986         if (!err && app_count) {
987                 table = kmalloc_array(app_count, sizeof(struct dcb_app),
988                                       GFP_KERNEL);
989                 if (!table)
990                         return -ENOMEM;
991
992                 err = ops->peer_getapptable(netdev, table);
993         }
994
995         if (!err) {
996                 u16 i;
997                 struct nlattr *app;
998
999                 /**
1000                  * build the message, from here on the only possible failure
1001                  * is due to the skb size
1002                  */
1003                 err = -EMSGSIZE;
1004
1005                 app = nla_nest_start(skb, app_nested_type);
1006                 if (!app)
1007                         goto nla_put_failure;
1008
1009                 if (app_info_type &&
1010                     nla_put(skb, app_info_type, sizeof(info), &info))
1011                         goto nla_put_failure;
1012
1013                 for (i = 0; i < app_count; i++) {
1014                         if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
1015                                     &table[i]))
1016                                 goto nla_put_failure;
1017                 }
1018                 nla_nest_end(skb, app);
1019         }
1020         err = 0;
1021
1022 nla_put_failure:
1023         kfree(table);
1024         return err;
1025 }
1026
1027 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
1028 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1029 {
1030         struct nlattr *ieee, *app;
1031         struct dcb_app_type *itr;
1032         const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1033         int dcbx;
1034         int err;
1035
1036         if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1037                 return -EMSGSIZE;
1038
1039         ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1040         if (!ieee)
1041                 return -EMSGSIZE;
1042
1043         if (ops->ieee_getets) {
1044                 struct ieee_ets ets;
1045                 memset(&ets, 0, sizeof(ets));
1046                 err = ops->ieee_getets(netdev, &ets);
1047                 if (!err &&
1048                     nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1049                         return -EMSGSIZE;
1050         }
1051
1052         if (ops->ieee_getmaxrate) {
1053                 struct ieee_maxrate maxrate;
1054                 memset(&maxrate, 0, sizeof(maxrate));
1055                 err = ops->ieee_getmaxrate(netdev, &maxrate);
1056                 if (!err) {
1057                         err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1058                                       sizeof(maxrate), &maxrate);
1059                         if (err)
1060                                 return -EMSGSIZE;
1061                 }
1062         }
1063
1064         if (ops->ieee_getqcn) {
1065                 struct ieee_qcn qcn;
1066
1067                 memset(&qcn, 0, sizeof(qcn));
1068                 err = ops->ieee_getqcn(netdev, &qcn);
1069                 if (!err) {
1070                         err = nla_put(skb, DCB_ATTR_IEEE_QCN,
1071                                       sizeof(qcn), &qcn);
1072                         if (err)
1073                                 return -EMSGSIZE;
1074                 }
1075         }
1076
1077         if (ops->ieee_getqcnstats) {
1078                 struct ieee_qcn_stats qcn_stats;
1079
1080                 memset(&qcn_stats, 0, sizeof(qcn_stats));
1081                 err = ops->ieee_getqcnstats(netdev, &qcn_stats);
1082                 if (!err) {
1083                         err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
1084                                       sizeof(qcn_stats), &qcn_stats);
1085                         if (err)
1086                                 return -EMSGSIZE;
1087                 }
1088         }
1089
1090         if (ops->ieee_getpfc) {
1091                 struct ieee_pfc pfc;
1092                 memset(&pfc, 0, sizeof(pfc));
1093                 err = ops->ieee_getpfc(netdev, &pfc);
1094                 if (!err &&
1095                     nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1096                         return -EMSGSIZE;
1097         }
1098
1099         if (ops->dcbnl_getbuffer) {
1100                 struct dcbnl_buffer buffer;
1101
1102                 memset(&buffer, 0, sizeof(buffer));
1103                 err = ops->dcbnl_getbuffer(netdev, &buffer);
1104                 if (!err &&
1105                     nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer))
1106                         return -EMSGSIZE;
1107         }
1108
1109         app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1110         if (!app)
1111                 return -EMSGSIZE;
1112
1113         spin_lock_bh(&dcb_lock);
1114         list_for_each_entry(itr, &dcb_app_list, list) {
1115                 if (itr->ifindex == netdev->ifindex) {
1116                         err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1117                                          &itr->app);
1118                         if (err) {
1119                                 spin_unlock_bh(&dcb_lock);
1120                                 return -EMSGSIZE;
1121                         }
1122                 }
1123         }
1124
1125         if (netdev->dcbnl_ops->getdcbx)
1126                 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1127         else
1128                 dcbx = -EOPNOTSUPP;
1129
1130         spin_unlock_bh(&dcb_lock);
1131         nla_nest_end(skb, app);
1132
1133         /* get peer info if available */
1134         if (ops->ieee_peer_getets) {
1135                 struct ieee_ets ets;
1136                 memset(&ets, 0, sizeof(ets));
1137                 err = ops->ieee_peer_getets(netdev, &ets);
1138                 if (!err &&
1139                     nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1140                         return -EMSGSIZE;
1141         }
1142
1143         if (ops->ieee_peer_getpfc) {
1144                 struct ieee_pfc pfc;
1145                 memset(&pfc, 0, sizeof(pfc));
1146                 err = ops->ieee_peer_getpfc(netdev, &pfc);
1147                 if (!err &&
1148                     nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1149                         return -EMSGSIZE;
1150         }
1151
1152         if (ops->peer_getappinfo && ops->peer_getapptable) {
1153                 err = dcbnl_build_peer_app(netdev, skb,
1154                                            DCB_ATTR_IEEE_PEER_APP,
1155                                            DCB_ATTR_IEEE_APP_UNSPEC,
1156                                            DCB_ATTR_IEEE_APP);
1157                 if (err)
1158                         return -EMSGSIZE;
1159         }
1160
1161         nla_nest_end(skb, ieee);
1162         if (dcbx >= 0) {
1163                 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1164                 if (err)
1165                         return -EMSGSIZE;
1166         }
1167
1168         return 0;
1169 }
1170
1171 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1172                              int dir)
1173 {
1174         u8 pgid, up_map, prio, tc_pct;
1175         const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1176         int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1177         struct nlattr *pg = nla_nest_start(skb, i);
1178
1179         if (!pg)
1180                 return -EMSGSIZE;
1181
1182         for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1183                 struct nlattr *tc_nest = nla_nest_start(skb, i);
1184
1185                 if (!tc_nest)
1186                         return -EMSGSIZE;
1187
1188                 pgid = DCB_ATTR_VALUE_UNDEFINED;
1189                 prio = DCB_ATTR_VALUE_UNDEFINED;
1190                 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1191                 up_map = DCB_ATTR_VALUE_UNDEFINED;
1192
1193                 if (!dir)
1194                         ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1195                                           &prio, &pgid, &tc_pct, &up_map);
1196                 else
1197                         ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1198                                           &prio, &pgid, &tc_pct, &up_map);
1199
1200                 if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
1201                     nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1202                     nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1203                     nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1204                         return -EMSGSIZE;
1205                 nla_nest_end(skb, tc_nest);
1206         }
1207
1208         for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1209                 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1210
1211                 if (!dir)
1212                         ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1213                                            &tc_pct);
1214                 else
1215                         ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1216                                            &tc_pct);
1217                 if (nla_put_u8(skb, i, tc_pct))
1218                         return -EMSGSIZE;
1219         }
1220         nla_nest_end(skb, pg);
1221         return 0;
1222 }
1223
1224 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1225 {
1226         struct nlattr *cee, *app;
1227         struct dcb_app_type *itr;
1228         const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1229         int dcbx, i, err = -EMSGSIZE;
1230         u8 value;
1231
1232         if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1233                 goto nla_put_failure;
1234         cee = nla_nest_start(skb, DCB_ATTR_CEE);
1235         if (!cee)
1236                 goto nla_put_failure;
1237
1238         /* local pg */
1239         if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1240                 err = dcbnl_cee_pg_fill(skb, netdev, 1);
1241                 if (err)
1242                         goto nla_put_failure;
1243         }
1244
1245         if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1246                 err = dcbnl_cee_pg_fill(skb, netdev, 0);
1247                 if (err)
1248                         goto nla_put_failure;
1249         }
1250
1251         /* local pfc */
1252         if (ops->getpfccfg) {
1253                 struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
1254
1255                 if (!pfc_nest)
1256                         goto nla_put_failure;
1257
1258                 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1259                         ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1260                         if (nla_put_u8(skb, i, value))
1261                                 goto nla_put_failure;
1262                 }
1263                 nla_nest_end(skb, pfc_nest);
1264         }
1265
1266         /* local app */
1267         spin_lock_bh(&dcb_lock);
1268         app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1269         if (!app)
1270                 goto dcb_unlock;
1271
1272         list_for_each_entry(itr, &dcb_app_list, list) {
1273                 if (itr->ifindex == netdev->ifindex) {
1274                         struct nlattr *app_nest = nla_nest_start(skb,
1275                                                                  DCB_ATTR_APP);
1276                         if (!app_nest)
1277                                 goto dcb_unlock;
1278
1279                         err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1280                                          itr->app.selector);
1281                         if (err)
1282                                 goto dcb_unlock;
1283
1284                         err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1285                                           itr->app.protocol);
1286                         if (err)
1287                                 goto dcb_unlock;
1288
1289                         err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1290                                          itr->app.priority);
1291                         if (err)
1292                                 goto dcb_unlock;
1293
1294                         nla_nest_end(skb, app_nest);
1295                 }
1296         }
1297         nla_nest_end(skb, app);
1298
1299         if (netdev->dcbnl_ops->getdcbx)
1300                 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1301         else
1302                 dcbx = -EOPNOTSUPP;
1303
1304         spin_unlock_bh(&dcb_lock);
1305
1306         /* features flags */
1307         if (ops->getfeatcfg) {
1308                 struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
1309                 if (!feat)
1310                         goto nla_put_failure;
1311
1312                 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1313                      i++)
1314                         if (!ops->getfeatcfg(netdev, i, &value) &&
1315                             nla_put_u8(skb, i, value))
1316                                 goto nla_put_failure;
1317
1318                 nla_nest_end(skb, feat);
1319         }
1320
1321         /* peer info if available */
1322         if (ops->cee_peer_getpg) {
1323                 struct cee_pg pg;
1324                 memset(&pg, 0, sizeof(pg));
1325                 err = ops->cee_peer_getpg(netdev, &pg);
1326                 if (!err &&
1327                     nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
1328                         goto nla_put_failure;
1329         }
1330
1331         if (ops->cee_peer_getpfc) {
1332                 struct cee_pfc pfc;
1333                 memset(&pfc, 0, sizeof(pfc));
1334                 err = ops->cee_peer_getpfc(netdev, &pfc);
1335                 if (!err &&
1336                     nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
1337                         goto nla_put_failure;
1338         }
1339
1340         if (ops->peer_getappinfo && ops->peer_getapptable) {
1341                 err = dcbnl_build_peer_app(netdev, skb,
1342                                            DCB_ATTR_CEE_PEER_APP_TABLE,
1343                                            DCB_ATTR_CEE_PEER_APP_INFO,
1344                                            DCB_ATTR_CEE_PEER_APP);
1345                 if (err)
1346                         goto nla_put_failure;
1347         }
1348         nla_nest_end(skb, cee);
1349
1350         /* DCBX state */
1351         if (dcbx >= 0) {
1352                 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1353                 if (err)
1354                         goto nla_put_failure;
1355         }
1356         return 0;
1357
1358 dcb_unlock:
1359         spin_unlock_bh(&dcb_lock);
1360 nla_put_failure:
1361         err = -EMSGSIZE;
1362         return err;
1363 }
1364
1365 static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1366                         u32 seq, u32 portid, int dcbx_ver)
1367 {
1368         struct net *net = dev_net(dev);
1369         struct sk_buff *skb;
1370         struct nlmsghdr *nlh;
1371         const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1372         int err;
1373
1374         if (!ops)
1375                 return -EOPNOTSUPP;
1376
1377         skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
1378         if (!skb)
1379                 return -ENOBUFS;
1380
1381         if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1382                 err = dcbnl_ieee_fill(skb, dev);
1383         else
1384                 err = dcbnl_cee_fill(skb, dev);
1385
1386         if (err < 0) {
1387                 /* Report error to broadcast listeners */
1388                 nlmsg_free(skb);
1389                 rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1390         } else {
1391                 /* End nlmsg and notify broadcast listeners */
1392                 nlmsg_end(skb, nlh);
1393                 rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1394         }
1395
1396         return err;
1397 }
1398
1399 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1400                       u32 seq, u32 portid)
1401 {
1402         return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
1403 }
1404 EXPORT_SYMBOL(dcbnl_ieee_notify);
1405
1406 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1407                      u32 seq, u32 portid)
1408 {
1409         return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
1410 }
1411 EXPORT_SYMBOL(dcbnl_cee_notify);
1412
1413 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
1414  * If any requested operation can not be completed
1415  * the entire msg is aborted and error value is returned.
1416  * No attempt is made to reconcile the case where only part of the
1417  * cmd can be completed.
1418  */
1419 static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
1420                           u32 seq, struct nlattr **tb, struct sk_buff *skb)
1421 {
1422         const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1423         struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1424         int prio;
1425         int err;
1426
1427         if (!ops)
1428                 return -EOPNOTSUPP;
1429
1430         if (!tb[DCB_ATTR_IEEE])
1431                 return -EINVAL;
1432
1433         err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE],
1434                                dcbnl_ieee_policy, NULL);
1435         if (err)
1436                 return err;
1437
1438         if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1439                 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1440                 err = ops->ieee_setets(netdev, ets);
1441                 if (err)
1442                         goto err;
1443         }
1444
1445         if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
1446                 struct ieee_maxrate *maxrate =
1447                         nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
1448                 err = ops->ieee_setmaxrate(netdev, maxrate);
1449                 if (err)
1450                         goto err;
1451         }
1452
1453         if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
1454                 struct ieee_qcn *qcn =
1455                         nla_data(ieee[DCB_ATTR_IEEE_QCN]);
1456
1457                 err = ops->ieee_setqcn(netdev, qcn);
1458                 if (err)
1459                         goto err;
1460         }
1461
1462         if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1463                 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1464                 err = ops->ieee_setpfc(netdev, pfc);
1465                 if (err)
1466                         goto err;
1467         }
1468
1469         if (ieee[DCB_ATTR_DCB_BUFFER] && ops->dcbnl_setbuffer) {
1470                 struct dcbnl_buffer *buffer =
1471                         nla_data(ieee[DCB_ATTR_DCB_BUFFER]);
1472
1473                 for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) {
1474                         if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) {
1475                                 err = -EINVAL;
1476                                 goto err;
1477                         }
1478                 }
1479
1480                 err = ops->dcbnl_setbuffer(netdev, buffer);
1481                 if (err)
1482                         goto err;
1483         }
1484
1485         if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1486                 struct nlattr *attr;
1487                 int rem;
1488
1489                 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1490                         struct dcb_app *app_data;
1491
1492                         if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1493                                 continue;
1494
1495                         if (nla_len(attr) < sizeof(struct dcb_app)) {
1496                                 err = -ERANGE;
1497                                 goto err;
1498                         }
1499
1500                         app_data = nla_data(attr);
1501                         if (ops->ieee_setapp)
1502                                 err = ops->ieee_setapp(netdev, app_data);
1503                         else
1504                                 err = dcb_ieee_setapp(netdev, app_data);
1505                         if (err)
1506                                 goto err;
1507                 }
1508         }
1509
1510 err:
1511         err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1512         dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1513         return err;
1514 }
1515
1516 static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1517                           u32 seq, struct nlattr **tb, struct sk_buff *skb)
1518 {
1519         const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1520
1521         if (!ops)
1522                 return -EOPNOTSUPP;
1523
1524         return dcbnl_ieee_fill(skb, netdev);
1525 }
1526
1527 static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
1528                           u32 seq, struct nlattr **tb, struct sk_buff *skb)
1529 {
1530         const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1531         struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1532         int err;
1533
1534         if (!ops)
1535                 return -EOPNOTSUPP;
1536
1537         if (!tb[DCB_ATTR_IEEE])
1538                 return -EINVAL;
1539
1540         err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE],
1541                                dcbnl_ieee_policy, NULL);
1542         if (err)
1543                 return err;
1544
1545         if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1546                 struct nlattr *attr;
1547                 int rem;
1548
1549                 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1550                         struct dcb_app *app_data;
1551
1552                         if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1553                                 continue;
1554                         app_data = nla_data(attr);
1555                         if (ops->ieee_delapp)
1556                                 err = ops->ieee_delapp(netdev, app_data);
1557                         else
1558                                 err = dcb_ieee_delapp(netdev, app_data);
1559                         if (err)
1560                                 goto err;
1561                 }
1562         }
1563
1564 err:
1565         err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1566         dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1567         return err;
1568 }
1569
1570
1571 /* DCBX configuration */
1572 static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1573                          u32 seq, struct nlattr **tb, struct sk_buff *skb)
1574 {
1575         if (!netdev->dcbnl_ops->getdcbx)
1576                 return -EOPNOTSUPP;
1577
1578         return nla_put_u8(skb, DCB_ATTR_DCBX,
1579                           netdev->dcbnl_ops->getdcbx(netdev));
1580 }
1581
1582 static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1583                          u32 seq, struct nlattr **tb, struct sk_buff *skb)
1584 {
1585         u8 value;
1586
1587         if (!netdev->dcbnl_ops->setdcbx)
1588                 return -EOPNOTSUPP;
1589
1590         if (!tb[DCB_ATTR_DCBX])
1591                 return -EINVAL;
1592
1593         value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1594
1595         return nla_put_u8(skb, DCB_ATTR_DCBX,
1596                           netdev->dcbnl_ops->setdcbx(netdev, value));
1597 }
1598
1599 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1600                             u32 seq, struct nlattr **tb, struct sk_buff *skb)
1601 {
1602         struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1603         u8 value;
1604         int ret, i;
1605         int getall = 0;
1606
1607         if (!netdev->dcbnl_ops->getfeatcfg)
1608                 return -EOPNOTSUPP;
1609
1610         if (!tb[DCB_ATTR_FEATCFG])
1611                 return -EINVAL;
1612
1613         ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX,
1614                                tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL);
1615         if (ret)
1616                 return ret;
1617
1618         nest = nla_nest_start(skb, DCB_ATTR_FEATCFG);
1619         if (!nest)
1620                 return -EMSGSIZE;
1621
1622         if (data[DCB_FEATCFG_ATTR_ALL])
1623                 getall = 1;
1624
1625         for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1626                 if (!getall && !data[i])
1627                         continue;
1628
1629                 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1630                 if (!ret)
1631                         ret = nla_put_u8(skb, i, value);
1632
1633                 if (ret) {
1634                         nla_nest_cancel(skb, nest);
1635                         goto nla_put_failure;
1636                 }
1637         }
1638         nla_nest_end(skb, nest);
1639
1640 nla_put_failure:
1641         return ret;
1642 }
1643
1644 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1645                             u32 seq, struct nlattr **tb, struct sk_buff *skb)
1646 {
1647         struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1648         int ret, i;
1649         u8 value;
1650
1651         if (!netdev->dcbnl_ops->setfeatcfg)
1652                 return -ENOTSUPP;
1653
1654         if (!tb[DCB_ATTR_FEATCFG])
1655                 return -EINVAL;
1656
1657         ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX,
1658                                tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL);
1659
1660         if (ret)
1661                 goto err;
1662
1663         for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1664                 if (data[i] == NULL)
1665                         continue;
1666
1667                 value = nla_get_u8(data[i]);
1668
1669                 ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1670
1671                 if (ret)
1672                         goto err;
1673         }
1674 err:
1675         ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
1676
1677         return ret;
1678 }
1679
1680 /* Handle CEE DCBX GET commands. */
1681 static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1682                          u32 seq, struct nlattr **tb, struct sk_buff *skb)
1683 {
1684         const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1685
1686         if (!ops)
1687                 return -EOPNOTSUPP;
1688
1689         return dcbnl_cee_fill(skb, netdev);
1690 }
1691
1692 struct reply_func {
1693         /* reply netlink message type */
1694         int     type;
1695
1696         /* function to fill message contents */
1697         int   (*cb)(struct net_device *, struct nlmsghdr *, u32,
1698                     struct nlattr **, struct sk_buff *);
1699 };
1700
1701 static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
1702         [DCB_CMD_GSTATE]        = { RTM_GETDCB, dcbnl_getstate },
1703         [DCB_CMD_SSTATE]        = { RTM_SETDCB, dcbnl_setstate },
1704         [DCB_CMD_PFC_GCFG]      = { RTM_GETDCB, dcbnl_getpfccfg },
1705         [DCB_CMD_PFC_SCFG]      = { RTM_SETDCB, dcbnl_setpfccfg },
1706         [DCB_CMD_GPERM_HWADDR]  = { RTM_GETDCB, dcbnl_getperm_hwaddr },
1707         [DCB_CMD_GCAP]          = { RTM_GETDCB, dcbnl_getcap },
1708         [DCB_CMD_GNUMTCS]       = { RTM_GETDCB, dcbnl_getnumtcs },
1709         [DCB_CMD_SNUMTCS]       = { RTM_SETDCB, dcbnl_setnumtcs },
1710         [DCB_CMD_PFC_GSTATE]    = { RTM_GETDCB, dcbnl_getpfcstate },
1711         [DCB_CMD_PFC_SSTATE]    = { RTM_SETDCB, dcbnl_setpfcstate },
1712         [DCB_CMD_GAPP]          = { RTM_GETDCB, dcbnl_getapp },
1713         [DCB_CMD_SAPP]          = { RTM_SETDCB, dcbnl_setapp },
1714         [DCB_CMD_PGTX_GCFG]     = { RTM_GETDCB, dcbnl_pgtx_getcfg },
1715         [DCB_CMD_PGTX_SCFG]     = { RTM_SETDCB, dcbnl_pgtx_setcfg },
1716         [DCB_CMD_PGRX_GCFG]     = { RTM_GETDCB, dcbnl_pgrx_getcfg },
1717         [DCB_CMD_PGRX_SCFG]     = { RTM_SETDCB, dcbnl_pgrx_setcfg },
1718         [DCB_CMD_SET_ALL]       = { RTM_SETDCB, dcbnl_setall },
1719         [DCB_CMD_BCN_GCFG]      = { RTM_GETDCB, dcbnl_bcn_getcfg },
1720         [DCB_CMD_BCN_SCFG]      = { RTM_SETDCB, dcbnl_bcn_setcfg },
1721         [DCB_CMD_IEEE_GET]      = { RTM_GETDCB, dcbnl_ieee_get },
1722         [DCB_CMD_IEEE_SET]      = { RTM_SETDCB, dcbnl_ieee_set },
1723         [DCB_CMD_IEEE_DEL]      = { RTM_SETDCB, dcbnl_ieee_del },
1724         [DCB_CMD_GDCBX]         = { RTM_GETDCB, dcbnl_getdcbx },
1725         [DCB_CMD_SDCBX]         = { RTM_SETDCB, dcbnl_setdcbx },
1726         [DCB_CMD_GFEATCFG]      = { RTM_GETDCB, dcbnl_getfeatcfg },
1727         [DCB_CMD_SFEATCFG]      = { RTM_SETDCB, dcbnl_setfeatcfg },
1728         [DCB_CMD_CEE_GET]       = { RTM_GETDCB, dcbnl_cee_get },
1729 };
1730
1731 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1732                     struct netlink_ext_ack *extack)
1733 {
1734         struct net *net = sock_net(skb->sk);
1735         struct net_device *netdev;
1736         struct dcbmsg *dcb = nlmsg_data(nlh);
1737         struct nlattr *tb[DCB_ATTR_MAX + 1];
1738         u32 portid = skb ? NETLINK_CB(skb).portid : 0;
1739         int ret = -EINVAL;
1740         struct sk_buff *reply_skb;
1741         struct nlmsghdr *reply_nlh = NULL;
1742         const struct reply_func *fn;
1743
1744         if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
1745                 return -EPERM;
1746
1747         ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1748                           dcbnl_rtnl_policy, extack);
1749         if (ret < 0)
1750                 return ret;
1751
1752         if (dcb->cmd > DCB_CMD_MAX)
1753                 return -EINVAL;
1754
1755         /* check if a reply function has been defined for the command */
1756         fn = &reply_funcs[dcb->cmd];
1757         if (!fn->cb)
1758                 return -EOPNOTSUPP;
1759         if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
1760                 return -EPERM;
1761
1762         if (!tb[DCB_ATTR_IFNAME])
1763                 return -EINVAL;
1764
1765         netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
1766         if (!netdev)
1767                 return -ENODEV;
1768
1769         if (!netdev->dcbnl_ops)
1770                 return -EOPNOTSUPP;
1771
1772         reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
1773                                  nlh->nlmsg_flags, &reply_nlh);
1774         if (!reply_skb)
1775                 return -ENOBUFS;
1776
1777         ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
1778         if (ret < 0) {
1779                 nlmsg_free(reply_skb);
1780                 goto out;
1781         }
1782
1783         nlmsg_end(reply_skb, reply_nlh);
1784
1785         ret = rtnl_unicast(reply_skb, net, portid);
1786 out:
1787         return ret;
1788 }
1789
1790 static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
1791                                            int ifindex, int prio)
1792 {
1793         struct dcb_app_type *itr;
1794
1795         list_for_each_entry(itr, &dcb_app_list, list) {
1796                 if (itr->app.selector == app->selector &&
1797                     itr->app.protocol == app->protocol &&
1798                     itr->ifindex == ifindex &&
1799                     ((prio == -1) || itr->app.priority == prio))
1800                         return itr;
1801         }
1802
1803         return NULL;
1804 }
1805
1806 static int dcb_app_add(const struct dcb_app *app, int ifindex)
1807 {
1808         struct dcb_app_type *entry;
1809
1810         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1811         if (!entry)
1812                 return -ENOMEM;
1813
1814         memcpy(&entry->app, app, sizeof(*app));
1815         entry->ifindex = ifindex;
1816         list_add(&entry->list, &dcb_app_list);
1817
1818         return 0;
1819 }
1820
1821 /**
1822  * dcb_getapp - retrieve the DCBX application user priority
1823  *
1824  * On success returns a non-zero 802.1p user priority bitmap
1825  * otherwise returns 0 as the invalid user priority bitmap to
1826  * indicate an error.
1827  */
1828 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
1829 {
1830         struct dcb_app_type *itr;
1831         u8 prio = 0;
1832
1833         spin_lock_bh(&dcb_lock);
1834         itr = dcb_app_lookup(app, dev->ifindex, -1);
1835         if (itr)
1836                 prio = itr->app.priority;
1837         spin_unlock_bh(&dcb_lock);
1838
1839         return prio;
1840 }
1841 EXPORT_SYMBOL(dcb_getapp);
1842
1843 /**
1844  * dcb_setapp - add CEE dcb application data to app list
1845  *
1846  * Priority 0 is an invalid priority in CEE spec. This routine
1847  * removes applications from the app list if the priority is
1848  * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
1849  */
1850 int dcb_setapp(struct net_device *dev, struct dcb_app *new)
1851 {
1852         struct dcb_app_type *itr;
1853         struct dcb_app_type event;
1854         int err = 0;
1855
1856         event.ifindex = dev->ifindex;
1857         memcpy(&event.app, new, sizeof(event.app));
1858         if (dev->dcbnl_ops->getdcbx)
1859                 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1860
1861         spin_lock_bh(&dcb_lock);
1862         /* Search for existing match and replace */
1863         itr = dcb_app_lookup(new, dev->ifindex, -1);
1864         if (itr) {
1865                 if (new->priority)
1866                         itr->app.priority = new->priority;
1867                 else {
1868                         list_del(&itr->list);
1869                         kfree(itr);
1870                 }
1871                 goto out;
1872         }
1873         /* App type does not exist add new application type */
1874         if (new->priority)
1875                 err = dcb_app_add(new, dev->ifindex);
1876 out:
1877         spin_unlock_bh(&dcb_lock);
1878         if (!err)
1879                 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1880         return err;
1881 }
1882 EXPORT_SYMBOL(dcb_setapp);
1883
1884 /**
1885  * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
1886  *
1887  * Helper routine which on success returns a non-zero 802.1Qaz user
1888  * priority bitmap otherwise returns 0 to indicate the dcb_app was
1889  * not found in APP list.
1890  */
1891 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
1892 {
1893         struct dcb_app_type *itr;
1894         u8 prio = 0;
1895
1896         spin_lock_bh(&dcb_lock);
1897         itr = dcb_app_lookup(app, dev->ifindex, -1);
1898         if (itr)
1899                 prio |= 1 << itr->app.priority;
1900         spin_unlock_bh(&dcb_lock);
1901
1902         return prio;
1903 }
1904 EXPORT_SYMBOL(dcb_ieee_getapp_mask);
1905
1906 /**
1907  * dcb_ieee_setapp - add IEEE dcb application data to app list
1908  *
1909  * This adds Application data to the list. Multiple application
1910  * entries may exists for the same selector and protocol as long
1911  * as the priorities are different. Priority is expected to be a
1912  * 3-bit unsigned integer
1913  */
1914 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
1915 {
1916         struct dcb_app_type event;
1917         int err = 0;
1918
1919         event.ifindex = dev->ifindex;
1920         memcpy(&event.app, new, sizeof(event.app));
1921         if (dev->dcbnl_ops->getdcbx)
1922                 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1923
1924         spin_lock_bh(&dcb_lock);
1925         /* Search for existing match and abort if found */
1926         if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
1927                 err = -EEXIST;
1928                 goto out;
1929         }
1930
1931         err = dcb_app_add(new, dev->ifindex);
1932 out:
1933         spin_unlock_bh(&dcb_lock);
1934         if (!err)
1935                 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1936         return err;
1937 }
1938 EXPORT_SYMBOL(dcb_ieee_setapp);
1939
1940 /**
1941  * dcb_ieee_delapp - delete IEEE dcb application data from list
1942  *
1943  * This removes a matching APP data from the APP list
1944  */
1945 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
1946 {
1947         struct dcb_app_type *itr;
1948         struct dcb_app_type event;
1949         int err = -ENOENT;
1950
1951         event.ifindex = dev->ifindex;
1952         memcpy(&event.app, del, sizeof(event.app));
1953         if (dev->dcbnl_ops->getdcbx)
1954                 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1955
1956         spin_lock_bh(&dcb_lock);
1957         /* Search for existing match and remove it. */
1958         if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
1959                 list_del(&itr->list);
1960                 kfree(itr);
1961                 err = 0;
1962         }
1963
1964         spin_unlock_bh(&dcb_lock);
1965         if (!err)
1966                 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1967         return err;
1968 }
1969 EXPORT_SYMBOL(dcb_ieee_delapp);
1970
1971 /**
1972  * dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from
1973  * priorities to the DSCP values assigned to that priority. Initialize p_map
1974  * such that each map element holds a bit mask of DSCP values configured for
1975  * that priority by APP entries.
1976  */
1977 void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
1978                                         struct dcb_ieee_app_prio_map *p_map)
1979 {
1980         int ifindex = dev->ifindex;
1981         struct dcb_app_type *itr;
1982         u8 prio;
1983
1984         memset(p_map->map, 0, sizeof(p_map->map));
1985
1986         spin_lock_bh(&dcb_lock);
1987         list_for_each_entry(itr, &dcb_app_list, list) {
1988                 if (itr->ifindex == ifindex &&
1989                     itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
1990                     itr->app.protocol < 64 &&
1991                     itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
1992                         prio = itr->app.priority;
1993                         p_map->map[prio] |= 1ULL << itr->app.protocol;
1994                 }
1995         }
1996         spin_unlock_bh(&dcb_lock);
1997 }
1998 EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map);
1999
2000 /**
2001  * dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from
2002  * DSCP values to the priorities assigned to that DSCP value. Initialize p_map
2003  * such that each map element holds a bit mask of priorities configured for a
2004  * given DSCP value by APP entries.
2005  */
2006 void
2007 dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
2008                                    struct dcb_ieee_app_dscp_map *p_map)
2009 {
2010         int ifindex = dev->ifindex;
2011         struct dcb_app_type *itr;
2012
2013         memset(p_map->map, 0, sizeof(p_map->map));
2014
2015         spin_lock_bh(&dcb_lock);
2016         list_for_each_entry(itr, &dcb_app_list, list) {
2017                 if (itr->ifindex == ifindex &&
2018                     itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
2019                     itr->app.protocol < 64 &&
2020                     itr->app.priority < IEEE_8021QAZ_MAX_TCS)
2021                         p_map->map[itr->app.protocol] |= 1 << itr->app.priority;
2022         }
2023         spin_unlock_bh(&dcb_lock);
2024 }
2025 EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map);
2026
2027 /**
2028  * Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet
2029  * type, with valid PID values >= 1536. A special meaning is then assigned to
2030  * protocol value of 0: "default priority. For use when priority is not
2031  * otherwise specified".
2032  *
2033  * dcb_ieee_getapp_default_prio_mask - For a given device, find all APP entries
2034  * of the form {$PRIO, ETHERTYPE, 0} and construct a bit mask of all default
2035  * priorities set by these entries.
2036  */
2037 u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev)
2038 {
2039         int ifindex = dev->ifindex;
2040         struct dcb_app_type *itr;
2041         u8 mask = 0;
2042
2043         spin_lock_bh(&dcb_lock);
2044         list_for_each_entry(itr, &dcb_app_list, list) {
2045                 if (itr->ifindex == ifindex &&
2046                     itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
2047                     itr->app.protocol == 0 &&
2048                     itr->app.priority < IEEE_8021QAZ_MAX_TCS)
2049                         mask |= 1 << itr->app.priority;
2050         }
2051         spin_unlock_bh(&dcb_lock);
2052
2053         return mask;
2054 }
2055 EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask);
2056
2057 static void dcbnl_flush_dev(struct net_device *dev)
2058 {
2059         struct dcb_app_type *itr, *tmp;
2060
2061         spin_lock_bh(&dcb_lock);
2062
2063         list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) {
2064                 if (itr->ifindex == dev->ifindex) {
2065                         list_del(&itr->list);
2066                         kfree(itr);
2067                 }
2068         }
2069
2070         spin_unlock_bh(&dcb_lock);
2071 }
2072
2073 static int dcbnl_netdevice_event(struct notifier_block *nb,
2074                                  unsigned long event, void *ptr)
2075 {
2076         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2077
2078         switch (event) {
2079         case NETDEV_UNREGISTER:
2080                 if (!dev->dcbnl_ops)
2081                         return NOTIFY_DONE;
2082
2083                 dcbnl_flush_dev(dev);
2084
2085                 return NOTIFY_OK;
2086         default:
2087                 return NOTIFY_DONE;
2088         }
2089 }
2090
2091 static struct notifier_block dcbnl_nb __read_mostly = {
2092         .notifier_call  = dcbnl_netdevice_event,
2093 };
2094
2095 static int __init dcbnl_init(void)
2096 {
2097         int err;
2098
2099         INIT_LIST_HEAD(&dcb_app_list);
2100
2101         err = register_netdevice_notifier(&dcbnl_nb);
2102         if (err)
2103                 return err;
2104
2105         rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
2106         rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
2107
2108         return 0;
2109 }
2110 device_initcall(dcbnl_init);