1 /*
2  * Copyright (c) 2008-2011, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Author: Lucy Liu <lucy.liu@intel.com>
18  */
19 
20 #include <linux/netdevice.h>
21 #include <linux/netlink.h>
22 #include <linux/slab.h>
23 #include <net/netlink.h>
24 #include <net/rtnetlink.h>
25 #include <linux/dcbnl.h>
26 #include <net/dcbevent.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/module.h>
29 #include <net/sock.h>
30 
31 /**
32  * Data Center Bridging (DCB) is a collection of Ethernet enhancements
33  * intended to allow network traffic with differing requirements
34  * (highly reliable, no drops vs. best effort vs. low latency) to operate
35  * and co-exist on Ethernet.  Current DCB features are:
36  *
37  * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
38  *   framework for assigning bandwidth guarantees to traffic classes.
39  *
40  * Priority-based Flow Control (PFC) - provides a flow control mechanism which
41  *   can work independently for each 802.1p priority.
42  *
43  * Congestion Notification - provides a mechanism for end-to-end congestion
44  *   control for protocols which do not have built-in congestion management.
45  *
46  * More information about the emerging standards for these Ethernet features
47  * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
48  *
49  * This file implements an rtnetlink interface to allow configuration of DCB
50  * features for capable devices.
51  */
52 
53 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
54 MODULE_DESCRIPTION("Data Center Bridging netlink interface");
55 MODULE_LICENSE("GPL");
56 
57 /**************** DCB attribute policies *************************************/
58 
59 /* DCB netlink attributes policy */
60 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
61 	[DCB_ATTR_IFNAME]      = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
62 	[DCB_ATTR_STATE]       = {.type = NLA_U8},
63 	[DCB_ATTR_PFC_CFG]     = {.type = NLA_NESTED},
64 	[DCB_ATTR_PG_CFG]      = {.type = NLA_NESTED},
65 	[DCB_ATTR_SET_ALL]     = {.type = NLA_U8},
66 	[DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
67 	[DCB_ATTR_CAP]         = {.type = NLA_NESTED},
68 	[DCB_ATTR_PFC_STATE]   = {.type = NLA_U8},
69 	[DCB_ATTR_BCN]         = {.type = NLA_NESTED},
70 	[DCB_ATTR_APP]         = {.type = NLA_NESTED},
71 	[DCB_ATTR_IEEE]	       = {.type = NLA_NESTED},
72 	[DCB_ATTR_DCBX]        = {.type = NLA_U8},
73 	[DCB_ATTR_FEATCFG]     = {.type = NLA_NESTED},
74 };
75 
76 /* DCB priority flow control to User Priority nested attributes */
77 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
78 	[DCB_PFC_UP_ATTR_0]   = {.type = NLA_U8},
79 	[DCB_PFC_UP_ATTR_1]   = {.type = NLA_U8},
80 	[DCB_PFC_UP_ATTR_2]   = {.type = NLA_U8},
81 	[DCB_PFC_UP_ATTR_3]   = {.type = NLA_U8},
82 	[DCB_PFC_UP_ATTR_4]   = {.type = NLA_U8},
83 	[DCB_PFC_UP_ATTR_5]   = {.type = NLA_U8},
84 	[DCB_PFC_UP_ATTR_6]   = {.type = NLA_U8},
85 	[DCB_PFC_UP_ATTR_7]   = {.type = NLA_U8},
86 	[DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
87 };
88 
89 /* DCB priority grouping nested attributes */
90 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
91 	[DCB_PG_ATTR_TC_0]      = {.type = NLA_NESTED},
92 	[DCB_PG_ATTR_TC_1]      = {.type = NLA_NESTED},
93 	[DCB_PG_ATTR_TC_2]      = {.type = NLA_NESTED},
94 	[DCB_PG_ATTR_TC_3]      = {.type = NLA_NESTED},
95 	[DCB_PG_ATTR_TC_4]      = {.type = NLA_NESTED},
96 	[DCB_PG_ATTR_TC_5]      = {.type = NLA_NESTED},
97 	[DCB_PG_ATTR_TC_6]      = {.type = NLA_NESTED},
98 	[DCB_PG_ATTR_TC_7]      = {.type = NLA_NESTED},
99 	[DCB_PG_ATTR_TC_ALL]    = {.type = NLA_NESTED},
100 	[DCB_PG_ATTR_BW_ID_0]   = {.type = NLA_U8},
101 	[DCB_PG_ATTR_BW_ID_1]   = {.type = NLA_U8},
102 	[DCB_PG_ATTR_BW_ID_2]   = {.type = NLA_U8},
103 	[DCB_PG_ATTR_BW_ID_3]   = {.type = NLA_U8},
104 	[DCB_PG_ATTR_BW_ID_4]   = {.type = NLA_U8},
105 	[DCB_PG_ATTR_BW_ID_5]   = {.type = NLA_U8},
106 	[DCB_PG_ATTR_BW_ID_6]   = {.type = NLA_U8},
107 	[DCB_PG_ATTR_BW_ID_7]   = {.type = NLA_U8},
108 	[DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
109 };
110 
111 /* DCB traffic class nested attributes. */
112 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
113 	[DCB_TC_ATTR_PARAM_PGID]            = {.type = NLA_U8},
114 	[DCB_TC_ATTR_PARAM_UP_MAPPING]      = {.type = NLA_U8},
115 	[DCB_TC_ATTR_PARAM_STRICT_PRIO]     = {.type = NLA_U8},
116 	[DCB_TC_ATTR_PARAM_BW_PCT]          = {.type = NLA_U8},
117 	[DCB_TC_ATTR_PARAM_ALL]             = {.type = NLA_FLAG},
118 };
119 
120 /* DCB capabilities nested attributes. */
121 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
122 	[DCB_CAP_ATTR_ALL]     = {.type = NLA_FLAG},
123 	[DCB_CAP_ATTR_PG]      = {.type = NLA_U8},
124 	[DCB_CAP_ATTR_PFC]     = {.type = NLA_U8},
125 	[DCB_CAP_ATTR_UP2TC]   = {.type = NLA_U8},
126 	[DCB_CAP_ATTR_PG_TCS]  = {.type = NLA_U8},
127 	[DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
128 	[DCB_CAP_ATTR_GSP]     = {.type = NLA_U8},
129 	[DCB_CAP_ATTR_BCN]     = {.type = NLA_U8},
130 	[DCB_CAP_ATTR_DCBX]    = {.type = NLA_U8},
131 };
132 
133 /* DCB capabilities nested attributes. */
134 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
135 	[DCB_NUMTCS_ATTR_ALL]     = {.type = NLA_FLAG},
136 	[DCB_NUMTCS_ATTR_PG]      = {.type = NLA_U8},
137 	[DCB_NUMTCS_ATTR_PFC]     = {.type = NLA_U8},
138 };
139 
140 /* DCB BCN nested attributes. */
141 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
142 	[DCB_BCN_ATTR_RP_0]         = {.type = NLA_U8},
143 	[DCB_BCN_ATTR_RP_1]         = {.type = NLA_U8},
144 	[DCB_BCN_ATTR_RP_2]         = {.type = NLA_U8},
145 	[DCB_BCN_ATTR_RP_3]         = {.type = NLA_U8},
146 	[DCB_BCN_ATTR_RP_4]         = {.type = NLA_U8},
147 	[DCB_BCN_ATTR_RP_5]         = {.type = NLA_U8},
148 	[DCB_BCN_ATTR_RP_6]         = {.type = NLA_U8},
149 	[DCB_BCN_ATTR_RP_7]         = {.type = NLA_U8},
150 	[DCB_BCN_ATTR_RP_ALL]       = {.type = NLA_FLAG},
151 	[DCB_BCN_ATTR_BCNA_0]       = {.type = NLA_U32},
152 	[DCB_BCN_ATTR_BCNA_1]       = {.type = NLA_U32},
153 	[DCB_BCN_ATTR_ALPHA]        = {.type = NLA_U32},
154 	[DCB_BCN_ATTR_BETA]         = {.type = NLA_U32},
155 	[DCB_BCN_ATTR_GD]           = {.type = NLA_U32},
156 	[DCB_BCN_ATTR_GI]           = {.type = NLA_U32},
157 	[DCB_BCN_ATTR_TMAX]         = {.type = NLA_U32},
158 	[DCB_BCN_ATTR_TD]           = {.type = NLA_U32},
159 	[DCB_BCN_ATTR_RMIN]         = {.type = NLA_U32},
160 	[DCB_BCN_ATTR_W]            = {.type = NLA_U32},
161 	[DCB_BCN_ATTR_RD]           = {.type = NLA_U32},
162 	[DCB_BCN_ATTR_RU]           = {.type = NLA_U32},
163 	[DCB_BCN_ATTR_WRTT]         = {.type = NLA_U32},
164 	[DCB_BCN_ATTR_RI]           = {.type = NLA_U32},
165 	[DCB_BCN_ATTR_C]            = {.type = NLA_U32},
166 	[DCB_BCN_ATTR_ALL]          = {.type = NLA_FLAG},
167 };
168 
169 /* DCB APP nested attributes. */
170 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
171 	[DCB_APP_ATTR_IDTYPE]       = {.type = NLA_U8},
172 	[DCB_APP_ATTR_ID]           = {.type = NLA_U16},
173 	[DCB_APP_ATTR_PRIORITY]     = {.type = NLA_U8},
174 };
175 
176 /* IEEE 802.1Qaz nested attributes. */
177 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
178 	[DCB_ATTR_IEEE_ETS]	    = {.len = sizeof(struct ieee_ets)},
179 	[DCB_ATTR_IEEE_PFC]	    = {.len = sizeof(struct ieee_pfc)},
180 	[DCB_ATTR_IEEE_APP_TABLE]   = {.type = NLA_NESTED},
181 };
182 
183 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
184 	[DCB_ATTR_IEEE_APP]	    = {.len = sizeof(struct dcb_app)},
185 };
186 
187 /* DCB number of traffic classes nested attributes. */
188 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
189 	[DCB_FEATCFG_ATTR_ALL]      = {.type = NLA_FLAG},
190 	[DCB_FEATCFG_ATTR_PG]       = {.type = NLA_U8},
191 	[DCB_FEATCFG_ATTR_PFC]      = {.type = NLA_U8},
192 	[DCB_FEATCFG_ATTR_APP]      = {.type = NLA_U8},
193 };
194 
195 static LIST_HEAD(dcb_app_list);
196 static DEFINE_SPINLOCK(dcb_lock);
197 
198 /* standard netlink reply call */
dcbnl_reply(u8 value,u8 event,u8 cmd,u8 attr,u32 pid,u32 seq,u16 flags)199 static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
200                        u32 seq, u16 flags)
201 {
202 	struct sk_buff *dcbnl_skb;
203 	struct dcbmsg *dcb;
204 	struct nlmsghdr *nlh;
205 	int ret = -EINVAL;
206 
207 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
208 	if (!dcbnl_skb)
209 		return ret;
210 
211 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
212 
213 	dcb = NLMSG_DATA(nlh);
214 	dcb->dcb_family = AF_UNSPEC;
215 	dcb->cmd = cmd;
216 	dcb->dcb_pad = 0;
217 
218 	ret = nla_put_u8(dcbnl_skb, attr, value);
219 	if (ret)
220 		goto err;
221 
222 	/* end the message, assign the nlmsg_len. */
223 	nlmsg_end(dcbnl_skb, nlh);
224 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
225 	if (ret)
226 		return -EINVAL;
227 
228 	return 0;
229 nlmsg_failure:
230 err:
231 	kfree_skb(dcbnl_skb);
232 	return ret;
233 }
234 
dcbnl_getstate(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)235 static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
236                           u32 pid, u32 seq, u16 flags)
237 {
238 	int ret = -EINVAL;
239 
240 	/* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
241 	if (!netdev->dcbnl_ops->getstate)
242 		return ret;
243 
244 	ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
245 	                  DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
246 
247 	return ret;
248 }
249 
dcbnl_getpfccfg(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)250 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
251                            u32 pid, u32 seq, u16 flags)
252 {
253 	struct sk_buff *dcbnl_skb;
254 	struct nlmsghdr *nlh;
255 	struct dcbmsg *dcb;
256 	struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
257 	u8 value;
258 	int ret = -EINVAL;
259 	int i;
260 	int getall = 0;
261 
262 	if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
263 		return ret;
264 
265 	ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
266 	                       tb[DCB_ATTR_PFC_CFG],
267 	                       dcbnl_pfc_up_nest);
268 	if (ret)
269 		goto err_out;
270 
271 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
272 	if (!dcbnl_skb)
273 		goto err_out;
274 
275 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
276 
277 	dcb = NLMSG_DATA(nlh);
278 	dcb->dcb_family = AF_UNSPEC;
279 	dcb->cmd = DCB_CMD_PFC_GCFG;
280 
281 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
282 	if (!nest)
283 		goto err;
284 
285 	if (data[DCB_PFC_UP_ATTR_ALL])
286 		getall = 1;
287 
288 	for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
289 		if (!getall && !data[i])
290 			continue;
291 
292 		netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
293 		                             &value);
294 		ret = nla_put_u8(dcbnl_skb, i, value);
295 
296 		if (ret) {
297 			nla_nest_cancel(dcbnl_skb, nest);
298 			goto err;
299 		}
300 	}
301 	nla_nest_end(dcbnl_skb, nest);
302 
303 	nlmsg_end(dcbnl_skb, nlh);
304 
305 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
306 	if (ret)
307 		goto err_out;
308 
309 	return 0;
310 nlmsg_failure:
311 err:
312 	kfree_skb(dcbnl_skb);
313 err_out:
314 	return -EINVAL;
315 }
316 
dcbnl_getperm_hwaddr(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)317 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
318                                 u32 pid, u32 seq, u16 flags)
319 {
320 	struct sk_buff *dcbnl_skb;
321 	struct nlmsghdr *nlh;
322 	struct dcbmsg *dcb;
323 	u8 perm_addr[MAX_ADDR_LEN];
324 	int ret = -EINVAL;
325 
326 	if (!netdev->dcbnl_ops->getpermhwaddr)
327 		return ret;
328 
329 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
330 	if (!dcbnl_skb)
331 		goto err_out;
332 
333 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
334 
335 	dcb = NLMSG_DATA(nlh);
336 	dcb->dcb_family = AF_UNSPEC;
337 	dcb->cmd = DCB_CMD_GPERM_HWADDR;
338 
339 	memset(perm_addr, 0, sizeof(perm_addr));
340 	netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
341 
342 	ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
343 	              perm_addr);
344 
345 	nlmsg_end(dcbnl_skb, nlh);
346 
347 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
348 	if (ret)
349 		goto err_out;
350 
351 	return 0;
352 
353 nlmsg_failure:
354 	kfree_skb(dcbnl_skb);
355 err_out:
356 	return -EINVAL;
357 }
358 
dcbnl_getcap(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)359 static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
360                         u32 pid, u32 seq, u16 flags)
361 {
362 	struct sk_buff *dcbnl_skb;
363 	struct nlmsghdr *nlh;
364 	struct dcbmsg *dcb;
365 	struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
366 	u8 value;
367 	int ret = -EINVAL;
368 	int i;
369 	int getall = 0;
370 
371 	if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap)
372 		return ret;
373 
374 	ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
375 	                       dcbnl_cap_nest);
376 	if (ret)
377 		goto err_out;
378 
379 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
380 	if (!dcbnl_skb)
381 		goto err_out;
382 
383 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
384 
385 	dcb = NLMSG_DATA(nlh);
386 	dcb->dcb_family = AF_UNSPEC;
387 	dcb->cmd = DCB_CMD_GCAP;
388 
389 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP);
390 	if (!nest)
391 		goto err;
392 
393 	if (data[DCB_CAP_ATTR_ALL])
394 		getall = 1;
395 
396 	for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
397 		if (!getall && !data[i])
398 			continue;
399 
400 		if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
401 			ret = nla_put_u8(dcbnl_skb, i, value);
402 
403 			if (ret) {
404 				nla_nest_cancel(dcbnl_skb, nest);
405 				goto err;
406 			}
407 		}
408 	}
409 	nla_nest_end(dcbnl_skb, nest);
410 
411 	nlmsg_end(dcbnl_skb, nlh);
412 
413 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
414 	if (ret)
415 		goto err_out;
416 
417 	return 0;
418 nlmsg_failure:
419 err:
420 	kfree_skb(dcbnl_skb);
421 err_out:
422 	return -EINVAL;
423 }
424 
dcbnl_getnumtcs(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)425 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
426                            u32 pid, u32 seq, u16 flags)
427 {
428 	struct sk_buff *dcbnl_skb;
429 	struct nlmsghdr *nlh;
430 	struct dcbmsg *dcb;
431 	struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
432 	u8 value;
433 	int ret = -EINVAL;
434 	int i;
435 	int getall = 0;
436 
437 	if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs)
438 		return ret;
439 
440 	ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
441 	                       dcbnl_numtcs_nest);
442 	if (ret) {
443 		ret = -EINVAL;
444 		goto err_out;
445 	}
446 
447 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
448 	if (!dcbnl_skb) {
449 		ret = -EINVAL;
450 		goto err_out;
451 	}
452 
453 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
454 
455 	dcb = NLMSG_DATA(nlh);
456 	dcb->dcb_family = AF_UNSPEC;
457 	dcb->cmd = DCB_CMD_GNUMTCS;
458 
459 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS);
460 	if (!nest) {
461 		ret = -EINVAL;
462 		goto err;
463 	}
464 
465 	if (data[DCB_NUMTCS_ATTR_ALL])
466 		getall = 1;
467 
468 	for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
469 		if (!getall && !data[i])
470 			continue;
471 
472 		ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
473 		if (!ret) {
474 			ret = nla_put_u8(dcbnl_skb, i, value);
475 
476 			if (ret) {
477 				nla_nest_cancel(dcbnl_skb, nest);
478 				ret = -EINVAL;
479 				goto err;
480 			}
481 		} else {
482 			goto err;
483 		}
484 	}
485 	nla_nest_end(dcbnl_skb, nest);
486 
487 	nlmsg_end(dcbnl_skb, nlh);
488 
489 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
490 	if (ret) {
491 		ret = -EINVAL;
492 		goto err_out;
493 	}
494 
495 	return 0;
496 nlmsg_failure:
497 err:
498 	kfree_skb(dcbnl_skb);
499 err_out:
500 	return ret;
501 }
502 
dcbnl_setnumtcs(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)503 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
504                            u32 pid, u32 seq, u16 flags)
505 {
506 	struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
507 	int ret = -EINVAL;
508 	u8 value;
509 	int i;
510 
511 	if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs)
512 		return ret;
513 
514 	ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
515 	                       dcbnl_numtcs_nest);
516 
517 	if (ret) {
518 		ret = -EINVAL;
519 		goto err;
520 	}
521 
522 	for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
523 		if (data[i] == NULL)
524 			continue;
525 
526 		value = nla_get_u8(data[i]);
527 
528 		ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
529 
530 		if (ret)
531 			goto operr;
532 	}
533 
534 operr:
535 	ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
536 	                  DCB_ATTR_NUMTCS, pid, seq, flags);
537 
538 err:
539 	return ret;
540 }
541 
dcbnl_getpfcstate(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)542 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb,
543                              u32 pid, u32 seq, u16 flags)
544 {
545 	int ret = -EINVAL;
546 
547 	if (!netdev->dcbnl_ops->getpfcstate)
548 		return ret;
549 
550 	ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
551 	                  DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
552 	                  pid, seq, flags);
553 
554 	return ret;
555 }
556 
dcbnl_setpfcstate(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)557 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
558                              u32 pid, u32 seq, u16 flags)
559 {
560 	int ret = -EINVAL;
561 	u8 value;
562 
563 	if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate)
564 		return ret;
565 
566 	value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
567 
568 	netdev->dcbnl_ops->setpfcstate(netdev, value);
569 
570 	ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE,
571 	                  pid, seq, flags);
572 
573 	return ret;
574 }
575 
dcbnl_getapp(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)576 static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
577                         u32 pid, u32 seq, u16 flags)
578 {
579 	struct sk_buff *dcbnl_skb;
580 	struct nlmsghdr *nlh;
581 	struct dcbmsg *dcb;
582 	struct nlattr *app_nest;
583 	struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
584 	u16 id;
585 	u8 up, idtype;
586 	int ret = -EINVAL;
587 
588 	if (!tb[DCB_ATTR_APP])
589 		goto out;
590 
591 	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
592 	                       dcbnl_app_nest);
593 	if (ret)
594 		goto out;
595 
596 	ret = -EINVAL;
597 	/* all must be non-null */
598 	if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
599 	    (!app_tb[DCB_APP_ATTR_ID]))
600 		goto out;
601 
602 	/* either by eth type or by socket number */
603 	idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
604 	if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
605 	    (idtype != DCB_APP_IDTYPE_PORTNUM))
606 		goto out;
607 
608 	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
609 
610 	if (netdev->dcbnl_ops->getapp) {
611 		up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
612 	} else {
613 		struct dcb_app app = {
614 					.selector = idtype,
615 					.protocol = id,
616 				     };
617 		up = dcb_getapp(netdev, &app);
618 	}
619 
620 	/* send this back */
621 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
622 	if (!dcbnl_skb)
623 		goto out;
624 
625 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
626 	dcb = NLMSG_DATA(nlh);
627 	dcb->dcb_family = AF_UNSPEC;
628 	dcb->cmd = DCB_CMD_GAPP;
629 
630 	app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
631 	if (!app_nest)
632 		goto out_cancel;
633 
634 	ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
635 	if (ret)
636 		goto out_cancel;
637 
638 	ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id);
639 	if (ret)
640 		goto out_cancel;
641 
642 	ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up);
643 	if (ret)
644 		goto out_cancel;
645 
646 	nla_nest_end(dcbnl_skb, app_nest);
647 	nlmsg_end(dcbnl_skb, nlh);
648 
649 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
650 	if (ret)
651 		goto nlmsg_failure;
652 
653 	goto out;
654 
655 out_cancel:
656 	nla_nest_cancel(dcbnl_skb, app_nest);
657 nlmsg_failure:
658 	kfree_skb(dcbnl_skb);
659 out:
660 	return ret;
661 }
662 
dcbnl_setapp(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)663 static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
664                         u32 pid, u32 seq, u16 flags)
665 {
666 	int err, ret = -EINVAL;
667 	u16 id;
668 	u8 up, idtype;
669 	struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
670 
671 	if (!tb[DCB_ATTR_APP])
672 		goto out;
673 
674 	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
675 	                       dcbnl_app_nest);
676 	if (ret)
677 		goto out;
678 
679 	ret = -EINVAL;
680 	/* all must be non-null */
681 	if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
682 	    (!app_tb[DCB_APP_ATTR_ID]) ||
683 	    (!app_tb[DCB_APP_ATTR_PRIORITY]))
684 		goto out;
685 
686 	/* either by eth type or by socket number */
687 	idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
688 	if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
689 	    (idtype != DCB_APP_IDTYPE_PORTNUM))
690 		goto out;
691 
692 	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
693 	up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
694 
695 	if (netdev->dcbnl_ops->setapp) {
696 		err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
697 	} else {
698 		struct dcb_app app;
699 		app.selector = idtype;
700 		app.protocol = id;
701 		app.priority = up;
702 		err = dcb_setapp(netdev, &app);
703 	}
704 
705 	ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
706 			  pid, seq, flags);
707 out:
708 	return ret;
709 }
710 
__dcbnl_pg_getcfg(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags,int dir)711 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
712                              u32 pid, u32 seq, u16 flags, int dir)
713 {
714 	struct sk_buff *dcbnl_skb;
715 	struct nlmsghdr *nlh;
716 	struct dcbmsg *dcb;
717 	struct nlattr *pg_nest, *param_nest, *data;
718 	struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
719 	struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
720 	u8 prio, pgid, tc_pct, up_map;
721 	int ret  = -EINVAL;
722 	int getall = 0;
723 	int i;
724 
725 	if (!tb[DCB_ATTR_PG_CFG] ||
726 	    !netdev->dcbnl_ops->getpgtccfgtx ||
727 	    !netdev->dcbnl_ops->getpgtccfgrx ||
728 	    !netdev->dcbnl_ops->getpgbwgcfgtx ||
729 	    !netdev->dcbnl_ops->getpgbwgcfgrx)
730 		return ret;
731 
732 	ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
733 	                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
734 
735 	if (ret)
736 		goto err_out;
737 
738 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
739 	if (!dcbnl_skb)
740 		goto err_out;
741 
742 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
743 
744 	dcb = NLMSG_DATA(nlh);
745 	dcb->dcb_family = AF_UNSPEC;
746 	dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
747 
748 	pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
749 	if (!pg_nest)
750 		goto err;
751 
752 	if (pg_tb[DCB_PG_ATTR_TC_ALL])
753 		getall = 1;
754 
755 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
756 		if (!getall && !pg_tb[i])
757 			continue;
758 
759 		if (pg_tb[DCB_PG_ATTR_TC_ALL])
760 			data = pg_tb[DCB_PG_ATTR_TC_ALL];
761 		else
762 			data = pg_tb[i];
763 		ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
764 				       data, dcbnl_tc_param_nest);
765 		if (ret)
766 			goto err_pg;
767 
768 		param_nest = nla_nest_start(dcbnl_skb, i);
769 		if (!param_nest)
770 			goto err_pg;
771 
772 		pgid = DCB_ATTR_VALUE_UNDEFINED;
773 		prio = DCB_ATTR_VALUE_UNDEFINED;
774 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
775 		up_map = DCB_ATTR_VALUE_UNDEFINED;
776 
777 		if (dir) {
778 			/* Rx */
779 			netdev->dcbnl_ops->getpgtccfgrx(netdev,
780 						i - DCB_PG_ATTR_TC_0, &prio,
781 						&pgid, &tc_pct, &up_map);
782 		} else {
783 			/* Tx */
784 			netdev->dcbnl_ops->getpgtccfgtx(netdev,
785 						i - DCB_PG_ATTR_TC_0, &prio,
786 						&pgid, &tc_pct, &up_map);
787 		}
788 
789 		if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
790 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
791 			ret = nla_put_u8(dcbnl_skb,
792 			                 DCB_TC_ATTR_PARAM_PGID, pgid);
793 			if (ret)
794 				goto err_param;
795 		}
796 		if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
797 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
798 			ret = nla_put_u8(dcbnl_skb,
799 			                 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
800 			if (ret)
801 				goto err_param;
802 		}
803 		if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
804 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
805 			ret = nla_put_u8(dcbnl_skb,
806 			                 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
807 			if (ret)
808 				goto err_param;
809 		}
810 		if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
811 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
812 			ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
813 			                 tc_pct);
814 			if (ret)
815 				goto err_param;
816 		}
817 		nla_nest_end(dcbnl_skb, param_nest);
818 	}
819 
820 	if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
821 		getall = 1;
822 	else
823 		getall = 0;
824 
825 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
826 		if (!getall && !pg_tb[i])
827 			continue;
828 
829 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
830 
831 		if (dir) {
832 			/* Rx */
833 			netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
834 					i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
835 		} else {
836 			/* Tx */
837 			netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
838 					i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
839 		}
840 		ret = nla_put_u8(dcbnl_skb, i, tc_pct);
841 
842 		if (ret)
843 			goto err_pg;
844 	}
845 
846 	nla_nest_end(dcbnl_skb, pg_nest);
847 
848 	nlmsg_end(dcbnl_skb, nlh);
849 
850 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
851 	if (ret)
852 		goto err_out;
853 
854 	return 0;
855 
856 err_param:
857 	nla_nest_cancel(dcbnl_skb, param_nest);
858 err_pg:
859 	nla_nest_cancel(dcbnl_skb, pg_nest);
860 nlmsg_failure:
861 err:
862 	kfree_skb(dcbnl_skb);
863 err_out:
864 	ret  = -EINVAL;
865 	return ret;
866 }
867 
dcbnl_pgtx_getcfg(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)868 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
869                              u32 pid, u32 seq, u16 flags)
870 {
871 	return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
872 }
873 
dcbnl_pgrx_getcfg(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)874 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
875                              u32 pid, u32 seq, u16 flags)
876 {
877 	return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
878 }
879 
dcbnl_setstate(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)880 static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
881                           u32 pid, u32 seq, u16 flags)
882 {
883 	int ret = -EINVAL;
884 	u8 value;
885 
886 	if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
887 		return ret;
888 
889 	value = nla_get_u8(tb[DCB_ATTR_STATE]);
890 
891 	ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value),
892 	                  RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
893 	                  pid, seq, flags);
894 
895 	return ret;
896 }
897 
dcbnl_setpfccfg(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)898 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
899                            u32 pid, u32 seq, u16 flags)
900 {
901 	struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
902 	int i;
903 	int ret = -EINVAL;
904 	u8 value;
905 
906 	if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
907 		return ret;
908 
909 	ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
910 	                       tb[DCB_ATTR_PFC_CFG],
911 	                       dcbnl_pfc_up_nest);
912 	if (ret)
913 		goto err;
914 
915 	for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
916 		if (data[i] == NULL)
917 			continue;
918 		value = nla_get_u8(data[i]);
919 		netdev->dcbnl_ops->setpfccfg(netdev,
920 			data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
921 	}
922 
923 	ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
924 	                  pid, seq, flags);
925 err:
926 	return ret;
927 }
928 
dcbnl_setall(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)929 static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
930                         u32 pid, u32 seq, u16 flags)
931 {
932 	int ret = -EINVAL;
933 
934 	if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
935 		return ret;
936 
937 	ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
938 	                  DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
939 
940 	return ret;
941 }
942 
__dcbnl_pg_setcfg(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags,int dir)943 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
944                              u32 pid, u32 seq, u16 flags, int dir)
945 {
946 	struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
947 	struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
948 	int ret = -EINVAL;
949 	int i;
950 	u8 pgid;
951 	u8 up_map;
952 	u8 prio;
953 	u8 tc_pct;
954 
955 	if (!tb[DCB_ATTR_PG_CFG] ||
956 	    !netdev->dcbnl_ops->setpgtccfgtx ||
957 	    !netdev->dcbnl_ops->setpgtccfgrx ||
958 	    !netdev->dcbnl_ops->setpgbwgcfgtx ||
959 	    !netdev->dcbnl_ops->setpgbwgcfgrx)
960 		return ret;
961 
962 	ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
963 	                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
964 	if (ret)
965 		goto err;
966 
967 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
968 		if (!pg_tb[i])
969 			continue;
970 
971 		ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
972 		                       pg_tb[i], dcbnl_tc_param_nest);
973 		if (ret)
974 			goto err;
975 
976 		pgid = DCB_ATTR_VALUE_UNDEFINED;
977 		prio = DCB_ATTR_VALUE_UNDEFINED;
978 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
979 		up_map = DCB_ATTR_VALUE_UNDEFINED;
980 
981 		if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
982 			prio =
983 			    nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
984 
985 		if (param_tb[DCB_TC_ATTR_PARAM_PGID])
986 			pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
987 
988 		if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
989 			tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
990 
991 		if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
992 			up_map =
993 			     nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
994 
995 		/* dir: Tx = 0, Rx = 1 */
996 		if (dir) {
997 			/* Rx */
998 			netdev->dcbnl_ops->setpgtccfgrx(netdev,
999 				i - DCB_PG_ATTR_TC_0,
1000 				prio, pgid, tc_pct, up_map);
1001 		} else {
1002 			/* Tx */
1003 			netdev->dcbnl_ops->setpgtccfgtx(netdev,
1004 				i - DCB_PG_ATTR_TC_0,
1005 				prio, pgid, tc_pct, up_map);
1006 		}
1007 	}
1008 
1009 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1010 		if (!pg_tb[i])
1011 			continue;
1012 
1013 		tc_pct = nla_get_u8(pg_tb[i]);
1014 
1015 		/* dir: Tx = 0, Rx = 1 */
1016 		if (dir) {
1017 			/* Rx */
1018 			netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
1019 					 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1020 		} else {
1021 			/* Tx */
1022 			netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
1023 					 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1024 		}
1025 	}
1026 
1027 	ret = dcbnl_reply(0, RTM_SETDCB,
1028 			  (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
1029 			  DCB_ATTR_PG_CFG, pid, seq, flags);
1030 
1031 err:
1032 	return ret;
1033 }
1034 
dcbnl_pgtx_setcfg(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)1035 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
1036                              u32 pid, u32 seq, u16 flags)
1037 {
1038 	return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
1039 }
1040 
dcbnl_pgrx_setcfg(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)1041 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
1042                              u32 pid, u32 seq, u16 flags)
1043 {
1044 	return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
1045 }
1046 
dcbnl_bcn_getcfg(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)1047 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
1048                             u32 pid, u32 seq, u16 flags)
1049 {
1050 	struct sk_buff *dcbnl_skb;
1051 	struct nlmsghdr *nlh;
1052 	struct dcbmsg *dcb;
1053 	struct nlattr *bcn_nest;
1054 	struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
1055 	u8 value_byte;
1056 	u32 value_integer;
1057 	int ret  = -EINVAL;
1058 	bool getall = false;
1059 	int i;
1060 
1061 	if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp ||
1062 	    !netdev->dcbnl_ops->getbcncfg)
1063 		return ret;
1064 
1065 	ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
1066 	                       tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
1067 
1068 	if (ret)
1069 		goto err_out;
1070 
1071 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1072 	if (!dcbnl_skb)
1073 		goto err_out;
1074 
1075 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1076 
1077 	dcb = NLMSG_DATA(nlh);
1078 	dcb->dcb_family = AF_UNSPEC;
1079 	dcb->cmd = DCB_CMD_BCN_GCFG;
1080 
1081 	bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN);
1082 	if (!bcn_nest)
1083 		goto err;
1084 
1085 	if (bcn_tb[DCB_BCN_ATTR_ALL])
1086 		getall = true;
1087 
1088 	for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1089 		if (!getall && !bcn_tb[i])
1090 			continue;
1091 
1092 		netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
1093 		                            &value_byte);
1094 		ret = nla_put_u8(dcbnl_skb, i, value_byte);
1095 		if (ret)
1096 			goto err_bcn;
1097 	}
1098 
1099 	for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
1100 		if (!getall && !bcn_tb[i])
1101 			continue;
1102 
1103 		netdev->dcbnl_ops->getbcncfg(netdev, i,
1104 		                             &value_integer);
1105 		ret = nla_put_u32(dcbnl_skb, i, value_integer);
1106 		if (ret)
1107 			goto err_bcn;
1108 	}
1109 
1110 	nla_nest_end(dcbnl_skb, bcn_nest);
1111 
1112 	nlmsg_end(dcbnl_skb, nlh);
1113 
1114 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
1115 	if (ret)
1116 		goto err_out;
1117 
1118 	return 0;
1119 
1120 err_bcn:
1121 	nla_nest_cancel(dcbnl_skb, bcn_nest);
1122 nlmsg_failure:
1123 err:
1124 	kfree_skb(dcbnl_skb);
1125 err_out:
1126 	ret  = -EINVAL;
1127 	return ret;
1128 }
1129 
dcbnl_bcn_setcfg(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)1130 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
1131                             u32 pid, u32 seq, u16 flags)
1132 {
1133 	struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
1134 	int i;
1135 	int ret = -EINVAL;
1136 	u8 value_byte;
1137 	u32 value_int;
1138 
1139 	if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
1140 	    !netdev->dcbnl_ops->setbcnrp)
1141 		return ret;
1142 
1143 	ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
1144 	                       tb[DCB_ATTR_BCN],
1145 	                       dcbnl_pfc_up_nest);
1146 	if (ret)
1147 		goto err;
1148 
1149 	for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1150 		if (data[i] == NULL)
1151 			continue;
1152 		value_byte = nla_get_u8(data[i]);
1153 		netdev->dcbnl_ops->setbcnrp(netdev,
1154 			data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
1155 	}
1156 
1157 	for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
1158 		if (data[i] == NULL)
1159 			continue;
1160 		value_int = nla_get_u32(data[i]);
1161 		netdev->dcbnl_ops->setbcncfg(netdev,
1162 	                                     i, value_int);
1163 	}
1164 
1165 	ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN,
1166 	                  pid, seq, flags);
1167 err:
1168 	return ret;
1169 }
1170 
dcbnl_build_peer_app(struct net_device * netdev,struct sk_buff * skb,int app_nested_type,int app_info_type,int app_entry_type)1171 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
1172 				int app_nested_type, int app_info_type,
1173 				int app_entry_type)
1174 {
1175 	struct dcb_peer_app_info info;
1176 	struct dcb_app *table = NULL;
1177 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1178 	u16 app_count;
1179 	int err;
1180 
1181 
1182 	/**
1183 	 * retrieve the peer app configuration form the driver. If the driver
1184 	 * handlers fail exit without doing anything
1185 	 */
1186 	err = ops->peer_getappinfo(netdev, &info, &app_count);
1187 	if (!err && app_count) {
1188 		table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
1189 		if (!table)
1190 			return -ENOMEM;
1191 
1192 		err = ops->peer_getapptable(netdev, table);
1193 	}
1194 
1195 	if (!err) {
1196 		u16 i;
1197 		struct nlattr *app;
1198 
1199 		/**
1200 		 * build the message, from here on the only possible failure
1201 		 * is due to the skb size
1202 		 */
1203 		err = -EMSGSIZE;
1204 
1205 		app = nla_nest_start(skb, app_nested_type);
1206 		if (!app)
1207 			goto nla_put_failure;
1208 
1209 		if (app_info_type)
1210 			NLA_PUT(skb, app_info_type, sizeof(info), &info);
1211 
1212 		for (i = 0; i < app_count; i++)
1213 			NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
1214 				&table[i]);
1215 
1216 		nla_nest_end(skb, app);
1217 	}
1218 	err = 0;
1219 
1220 nla_put_failure:
1221 	kfree(table);
1222 	return err;
1223 }
1224 
1225 /* Handle IEEE 802.1Qaz GET commands. */
dcbnl_ieee_fill(struct sk_buff * skb,struct net_device * netdev)1226 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1227 {
1228 	struct nlattr *ieee, *app;
1229 	struct dcb_app_type *itr;
1230 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1231 	int dcbx;
1232 	int err = -EMSGSIZE;
1233 
1234 	NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1235 
1236 	ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1237 	if (!ieee)
1238 		goto nla_put_failure;
1239 
1240 	if (ops->ieee_getets) {
1241 		struct ieee_ets ets;
1242 		memset(&ets, 0, sizeof(ets));
1243 		err = ops->ieee_getets(netdev, &ets);
1244 		if (!err)
1245 			NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
1246 	}
1247 
1248 	if (ops->ieee_getpfc) {
1249 		struct ieee_pfc pfc;
1250 		memset(&pfc, 0, sizeof(pfc));
1251 		err = ops->ieee_getpfc(netdev, &pfc);
1252 		if (!err)
1253 			NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
1254 	}
1255 
1256 	app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1257 	if (!app)
1258 		goto nla_put_failure;
1259 
1260 	spin_lock(&dcb_lock);
1261 	list_for_each_entry(itr, &dcb_app_list, list) {
1262 		if (itr->ifindex == netdev->ifindex) {
1263 			err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1264 					 &itr->app);
1265 			if (err) {
1266 				spin_unlock(&dcb_lock);
1267 				goto nla_put_failure;
1268 			}
1269 		}
1270 	}
1271 
1272 	if (netdev->dcbnl_ops->getdcbx)
1273 		dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1274 	else
1275 		dcbx = -EOPNOTSUPP;
1276 
1277 	spin_unlock(&dcb_lock);
1278 	nla_nest_end(skb, app);
1279 
1280 	/* get peer info if available */
1281 	if (ops->ieee_peer_getets) {
1282 		struct ieee_ets ets;
1283 		memset(&ets, 0, sizeof(ets));
1284 		err = ops->ieee_peer_getets(netdev, &ets);
1285 		if (!err)
1286 			NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
1287 	}
1288 
1289 	if (ops->ieee_peer_getpfc) {
1290 		struct ieee_pfc pfc;
1291 		memset(&pfc, 0, sizeof(pfc));
1292 		err = ops->ieee_peer_getpfc(netdev, &pfc);
1293 		if (!err)
1294 			NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
1295 	}
1296 
1297 	if (ops->peer_getappinfo && ops->peer_getapptable) {
1298 		err = dcbnl_build_peer_app(netdev, skb,
1299 					   DCB_ATTR_IEEE_PEER_APP,
1300 					   DCB_ATTR_IEEE_APP_UNSPEC,
1301 					   DCB_ATTR_IEEE_APP);
1302 		if (err)
1303 			goto nla_put_failure;
1304 	}
1305 
1306 	nla_nest_end(skb, ieee);
1307 	if (dcbx >= 0) {
1308 		err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1309 		if (err)
1310 			goto nla_put_failure;
1311 	}
1312 
1313 	return 0;
1314 
1315 nla_put_failure:
1316 	return err;
1317 }
1318 
dcbnl_cee_pg_fill(struct sk_buff * skb,struct net_device * dev,int dir)1319 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1320 			     int dir)
1321 {
1322 	u8 pgid, up_map, prio, tc_pct;
1323 	const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1324 	int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1325 	struct nlattr *pg = nla_nest_start(skb, i);
1326 
1327 	if (!pg)
1328 		goto nla_put_failure;
1329 
1330 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1331 		struct nlattr *tc_nest = nla_nest_start(skb, i);
1332 
1333 		if (!tc_nest)
1334 			goto nla_put_failure;
1335 
1336 		pgid = DCB_ATTR_VALUE_UNDEFINED;
1337 		prio = DCB_ATTR_VALUE_UNDEFINED;
1338 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1339 		up_map = DCB_ATTR_VALUE_UNDEFINED;
1340 
1341 		if (!dir)
1342 			ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1343 					  &prio, &pgid, &tc_pct, &up_map);
1344 		else
1345 			ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1346 					  &prio, &pgid, &tc_pct, &up_map);
1347 
1348 		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid);
1349 		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
1350 		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
1351 		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct);
1352 		nla_nest_end(skb, tc_nest);
1353 	}
1354 
1355 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1356 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1357 
1358 		if (!dir)
1359 			ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1360 					   &tc_pct);
1361 		else
1362 			ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1363 					   &tc_pct);
1364 		NLA_PUT_U8(skb, i, tc_pct);
1365 	}
1366 	nla_nest_end(skb, pg);
1367 	return 0;
1368 
1369 nla_put_failure:
1370 	return -EMSGSIZE;
1371 }
1372 
dcbnl_cee_fill(struct sk_buff * skb,struct net_device * netdev)1373 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1374 {
1375 	struct nlattr *cee, *app;
1376 	struct dcb_app_type *itr;
1377 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1378 	int dcbx, i, err = -EMSGSIZE;
1379 	u8 value;
1380 
1381 	NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1382 
1383 	cee = nla_nest_start(skb, DCB_ATTR_CEE);
1384 	if (!cee)
1385 		goto nla_put_failure;
1386 
1387 	/* local pg */
1388 	if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1389 		err = dcbnl_cee_pg_fill(skb, netdev, 1);
1390 		if (err)
1391 			goto nla_put_failure;
1392 	}
1393 
1394 	if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1395 		err = dcbnl_cee_pg_fill(skb, netdev, 0);
1396 		if (err)
1397 			goto nla_put_failure;
1398 	}
1399 
1400 	/* local pfc */
1401 	if (ops->getpfccfg) {
1402 		struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
1403 
1404 		if (!pfc_nest)
1405 			goto nla_put_failure;
1406 
1407 		for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1408 			ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1409 			NLA_PUT_U8(skb, i, value);
1410 		}
1411 		nla_nest_end(skb, pfc_nest);
1412 	}
1413 
1414 	/* local app */
1415 	spin_lock(&dcb_lock);
1416 	app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1417 	if (!app)
1418 		goto dcb_unlock;
1419 
1420 	list_for_each_entry(itr, &dcb_app_list, list) {
1421 		if (itr->ifindex == netdev->ifindex) {
1422 			struct nlattr *app_nest = nla_nest_start(skb,
1423 								 DCB_ATTR_APP);
1424 			if (!app_nest)
1425 				goto dcb_unlock;
1426 
1427 			err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1428 					 itr->app.selector);
1429 			if (err)
1430 				goto dcb_unlock;
1431 
1432 			err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1433 					  itr->app.protocol);
1434 			if (err)
1435 				goto dcb_unlock;
1436 
1437 			err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1438 					 itr->app.priority);
1439 			if (err)
1440 				goto dcb_unlock;
1441 
1442 			nla_nest_end(skb, app_nest);
1443 		}
1444 	}
1445 	nla_nest_end(skb, app);
1446 
1447 	if (netdev->dcbnl_ops->getdcbx)
1448 		dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1449 	else
1450 		dcbx = -EOPNOTSUPP;
1451 
1452 	spin_unlock(&dcb_lock);
1453 
1454 	/* features flags */
1455 	if (ops->getfeatcfg) {
1456 		struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
1457 		if (!feat)
1458 			goto nla_put_failure;
1459 
1460 		for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1461 		     i++)
1462 			if (!ops->getfeatcfg(netdev, i, &value))
1463 				NLA_PUT_U8(skb, i, value);
1464 
1465 		nla_nest_end(skb, feat);
1466 	}
1467 
1468 	/* peer info if available */
1469 	if (ops->cee_peer_getpg) {
1470 		struct cee_pg pg;
1471 		memset(&pg, 0, sizeof(pg));
1472 		err = ops->cee_peer_getpg(netdev, &pg);
1473 		if (!err)
1474 			NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
1475 	}
1476 
1477 	if (ops->cee_peer_getpfc) {
1478 		struct cee_pfc pfc;
1479 		memset(&pfc, 0, sizeof(pfc));
1480 		err = ops->cee_peer_getpfc(netdev, &pfc);
1481 		if (!err)
1482 			NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
1483 	}
1484 
1485 	if (ops->peer_getappinfo && ops->peer_getapptable) {
1486 		err = dcbnl_build_peer_app(netdev, skb,
1487 					   DCB_ATTR_CEE_PEER_APP_TABLE,
1488 					   DCB_ATTR_CEE_PEER_APP_INFO,
1489 					   DCB_ATTR_CEE_PEER_APP);
1490 		if (err)
1491 			goto nla_put_failure;
1492 	}
1493 	nla_nest_end(skb, cee);
1494 
1495 	/* DCBX state */
1496 	if (dcbx >= 0) {
1497 		err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1498 		if (err)
1499 			goto nla_put_failure;
1500 	}
1501 	return 0;
1502 
1503 dcb_unlock:
1504 	spin_unlock(&dcb_lock);
1505 nla_put_failure:
1506 	return err;
1507 }
1508 
dcbnl_notify(struct net_device * dev,int event,int cmd,u32 seq,u32 pid,int dcbx_ver)1509 static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1510 			u32 seq, u32 pid, int dcbx_ver)
1511 {
1512 	struct net *net = dev_net(dev);
1513 	struct sk_buff *skb;
1514 	struct nlmsghdr *nlh;
1515 	struct dcbmsg *dcb;
1516 	const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1517 	int err;
1518 
1519 	if (!ops)
1520 		return -EOPNOTSUPP;
1521 
1522 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1523 	if (!skb)
1524 		return -ENOBUFS;
1525 
1526 	nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0);
1527 	if (nlh == NULL) {
1528 		nlmsg_free(skb);
1529 		return -EMSGSIZE;
1530 	}
1531 
1532 	dcb = NLMSG_DATA(nlh);
1533 	dcb->dcb_family = AF_UNSPEC;
1534 	dcb->cmd = cmd;
1535 
1536 	if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1537 		err = dcbnl_ieee_fill(skb, dev);
1538 	else
1539 		err = dcbnl_cee_fill(skb, dev);
1540 
1541 	if (err < 0) {
1542 		/* Report error to broadcast listeners */
1543 		nlmsg_cancel(skb, nlh);
1544 		kfree_skb(skb);
1545 		rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1546 	} else {
1547 		/* End nlmsg and notify broadcast listeners */
1548 		nlmsg_end(skb, nlh);
1549 		rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1550 	}
1551 
1552 	return err;
1553 }
1554 
dcbnl_ieee_notify(struct net_device * dev,int event,int cmd,u32 seq,u32 pid)1555 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1556 		      u32 seq, u32 pid)
1557 {
1558 	return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE);
1559 }
1560 EXPORT_SYMBOL(dcbnl_ieee_notify);
1561 
dcbnl_cee_notify(struct net_device * dev,int event,int cmd,u32 seq,u32 pid)1562 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1563 		     u32 seq, u32 pid)
1564 {
1565 	return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE);
1566 }
1567 EXPORT_SYMBOL(dcbnl_cee_notify);
1568 
1569 /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1570  * be completed the entire msg is aborted and error value is returned.
1571  * No attempt is made to reconcile the case where only part of the
1572  * cmd can be completed.
1573  */
dcbnl_ieee_set(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)1574 static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1575 			  u32 pid, u32 seq, u16 flags)
1576 {
1577 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1578 	struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1579 	int err = -EOPNOTSUPP;
1580 
1581 	if (!ops)
1582 		return err;
1583 
1584 	if (!tb[DCB_ATTR_IEEE])
1585 		return -EINVAL;
1586 
1587 	err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1588 			       tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1589 	if (err)
1590 		return err;
1591 
1592 	if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1593 		struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1594 		err = ops->ieee_setets(netdev, ets);
1595 		if (err)
1596 			goto err;
1597 	}
1598 
1599 	if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1600 		struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1601 		err = ops->ieee_setpfc(netdev, pfc);
1602 		if (err)
1603 			goto err;
1604 	}
1605 
1606 	if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1607 		struct nlattr *attr;
1608 		int rem;
1609 
1610 		nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1611 			struct dcb_app *app_data;
1612 			if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1613 				continue;
1614 			app_data = nla_data(attr);
1615 			if (ops->ieee_setapp)
1616 				err = ops->ieee_setapp(netdev, app_data);
1617 			else
1618 				err = dcb_ieee_setapp(netdev, app_data);
1619 			if (err)
1620 				goto err;
1621 		}
1622 	}
1623 
1624 err:
1625 	dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
1626 		    pid, seq, flags);
1627 	dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1628 	return err;
1629 }
1630 
dcbnl_ieee_get(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)1631 static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
1632 			  u32 pid, u32 seq, u16 flags)
1633 {
1634 	struct net *net = dev_net(netdev);
1635 	struct sk_buff *skb;
1636 	struct nlmsghdr *nlh;
1637 	struct dcbmsg *dcb;
1638 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1639 	int err;
1640 
1641 	if (!ops)
1642 		return -EOPNOTSUPP;
1643 
1644 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1645 	if (!skb)
1646 		return -ENOBUFS;
1647 
1648 	nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1649 	if (nlh == NULL) {
1650 		nlmsg_free(skb);
1651 		return -EMSGSIZE;
1652 	}
1653 
1654 	dcb = NLMSG_DATA(nlh);
1655 	dcb->dcb_family = AF_UNSPEC;
1656 	dcb->cmd = DCB_CMD_IEEE_GET;
1657 
1658 	err = dcbnl_ieee_fill(skb, netdev);
1659 
1660 	if (err < 0) {
1661 		nlmsg_cancel(skb, nlh);
1662 		kfree_skb(skb);
1663 	} else {
1664 		nlmsg_end(skb, nlh);
1665 		err = rtnl_unicast(skb, net, pid);
1666 	}
1667 
1668 	return err;
1669 }
1670 
dcbnl_ieee_del(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)1671 static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
1672 			  u32 pid, u32 seq, u16 flags)
1673 {
1674 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1675 	struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1676 	int err = -EOPNOTSUPP;
1677 
1678 	if (!ops)
1679 		return -EOPNOTSUPP;
1680 
1681 	if (!tb[DCB_ATTR_IEEE])
1682 		return -EINVAL;
1683 
1684 	err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1685 			       tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1686 	if (err)
1687 		return err;
1688 
1689 	if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1690 		struct nlattr *attr;
1691 		int rem;
1692 
1693 		nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1694 			struct dcb_app *app_data;
1695 
1696 			if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1697 				continue;
1698 			app_data = nla_data(attr);
1699 			if (ops->ieee_delapp)
1700 				err = ops->ieee_delapp(netdev, app_data);
1701 			else
1702 				err = dcb_ieee_delapp(netdev, app_data);
1703 			if (err)
1704 				goto err;
1705 		}
1706 	}
1707 
1708 err:
1709 	dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE,
1710 		    pid, seq, flags);
1711 	dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1712 	return err;
1713 }
1714 
1715 
1716 /* DCBX configuration */
dcbnl_getdcbx(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)1717 static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
1718 			 u32 pid, u32 seq, u16 flags)
1719 {
1720 	int ret;
1721 
1722 	if (!netdev->dcbnl_ops->getdcbx)
1723 		return -EOPNOTSUPP;
1724 
1725 	ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB,
1726 			  DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags);
1727 
1728 	return ret;
1729 }
1730 
dcbnl_setdcbx(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)1731 static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
1732 			 u32 pid, u32 seq, u16 flags)
1733 {
1734 	int ret;
1735 	u8 value;
1736 
1737 	if (!netdev->dcbnl_ops->setdcbx)
1738 		return -EOPNOTSUPP;
1739 
1740 	if (!tb[DCB_ATTR_DCBX])
1741 		return -EINVAL;
1742 
1743 	value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1744 
1745 	ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value),
1746 			  RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX,
1747 			  pid, seq, flags);
1748 
1749 	return ret;
1750 }
1751 
dcbnl_getfeatcfg(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)1752 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
1753 			    u32 pid, u32 seq, u16 flags)
1754 {
1755 	struct sk_buff *dcbnl_skb;
1756 	struct nlmsghdr *nlh;
1757 	struct dcbmsg *dcb;
1758 	struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1759 	u8 value;
1760 	int ret, i;
1761 	int getall = 0;
1762 
1763 	if (!netdev->dcbnl_ops->getfeatcfg)
1764 		return -EOPNOTSUPP;
1765 
1766 	if (!tb[DCB_ATTR_FEATCFG])
1767 		return -EINVAL;
1768 
1769 	ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1770 			       dcbnl_featcfg_nest);
1771 	if (ret)
1772 		goto err_out;
1773 
1774 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1775 	if (!dcbnl_skb) {
1776 		ret = -ENOBUFS;
1777 		goto err_out;
1778 	}
1779 
1780 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1781 
1782 	dcb = NLMSG_DATA(nlh);
1783 	dcb->dcb_family = AF_UNSPEC;
1784 	dcb->cmd = DCB_CMD_GFEATCFG;
1785 
1786 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG);
1787 	if (!nest) {
1788 		ret = -EMSGSIZE;
1789 		goto nla_put_failure;
1790 	}
1791 
1792 	if (data[DCB_FEATCFG_ATTR_ALL])
1793 		getall = 1;
1794 
1795 	for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1796 		if (!getall && !data[i])
1797 			continue;
1798 
1799 		ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1800 		if (!ret)
1801 			ret = nla_put_u8(dcbnl_skb, i, value);
1802 
1803 		if (ret) {
1804 			nla_nest_cancel(dcbnl_skb, nest);
1805 			goto nla_put_failure;
1806 		}
1807 	}
1808 	nla_nest_end(dcbnl_skb, nest);
1809 
1810 	nlmsg_end(dcbnl_skb, nlh);
1811 
1812 	return rtnl_unicast(dcbnl_skb, &init_net, pid);
1813 nla_put_failure:
1814 	nlmsg_cancel(dcbnl_skb, nlh);
1815 nlmsg_failure:
1816 	kfree_skb(dcbnl_skb);
1817 err_out:
1818 	return ret;
1819 }
1820 
dcbnl_setfeatcfg(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)1821 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
1822 			    u32 pid, u32 seq, u16 flags)
1823 {
1824 	struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1825 	int ret, i;
1826 	u8 value;
1827 
1828 	if (!netdev->dcbnl_ops->setfeatcfg)
1829 		return -ENOTSUPP;
1830 
1831 	if (!tb[DCB_ATTR_FEATCFG])
1832 		return -EINVAL;
1833 
1834 	ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1835 			       dcbnl_featcfg_nest);
1836 
1837 	if (ret)
1838 		goto err;
1839 
1840 	for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1841 		if (data[i] == NULL)
1842 			continue;
1843 
1844 		value = nla_get_u8(data[i]);
1845 
1846 		ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1847 
1848 		if (ret)
1849 			goto err;
1850 	}
1851 err:
1852 	dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG,
1853 		    pid, seq, flags);
1854 
1855 	return ret;
1856 }
1857 
1858 /* Handle CEE DCBX GET commands. */
dcbnl_cee_get(struct net_device * netdev,struct nlattr ** tb,u32 pid,u32 seq,u16 flags)1859 static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
1860 			 u32 pid, u32 seq, u16 flags)
1861 {
1862 	struct net *net = dev_net(netdev);
1863 	struct sk_buff *skb;
1864 	struct nlmsghdr *nlh;
1865 	struct dcbmsg *dcb;
1866 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1867 	int err;
1868 
1869 	if (!ops)
1870 		return -EOPNOTSUPP;
1871 
1872 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1873 	if (!skb)
1874 		return -ENOBUFS;
1875 
1876 	nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1877 	if (nlh == NULL) {
1878 		nlmsg_free(skb);
1879 		return -EMSGSIZE;
1880 	}
1881 
1882 	dcb = NLMSG_DATA(nlh);
1883 	dcb->dcb_family = AF_UNSPEC;
1884 	dcb->cmd = DCB_CMD_CEE_GET;
1885 
1886 	err = dcbnl_cee_fill(skb, netdev);
1887 
1888 	if (err < 0) {
1889 		nlmsg_cancel(skb, nlh);
1890 		nlmsg_free(skb);
1891 	} else {
1892 		nlmsg_end(skb, nlh);
1893 		err = rtnl_unicast(skb, net, pid);
1894 	}
1895 	return err;
1896 }
1897 
dcb_doit(struct sk_buff * skb,struct nlmsghdr * nlh,void * arg)1898 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1899 {
1900 	struct net *net = sock_net(skb->sk);
1901 	struct net_device *netdev;
1902 	struct dcbmsg  *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
1903 	struct nlattr *tb[DCB_ATTR_MAX + 1];
1904 	u32 pid = skb ? NETLINK_CB(skb).pid : 0;
1905 	int ret = -EINVAL;
1906 
1907 	if (!net_eq(net, &init_net))
1908 		return -EINVAL;
1909 
1910 	ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1911 			  dcbnl_rtnl_policy);
1912 	if (ret < 0)
1913 		return ret;
1914 
1915 	if (!tb[DCB_ATTR_IFNAME])
1916 		return -EINVAL;
1917 
1918 	netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
1919 	if (!netdev)
1920 		return -EINVAL;
1921 
1922 	if (!netdev->dcbnl_ops)
1923 		goto errout;
1924 
1925 	switch (dcb->cmd) {
1926 	case DCB_CMD_GSTATE:
1927 		ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
1928 		                     nlh->nlmsg_flags);
1929 		goto out;
1930 	case DCB_CMD_PFC_GCFG:
1931 		ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1932 		                      nlh->nlmsg_flags);
1933 		goto out;
1934 	case DCB_CMD_GPERM_HWADDR:
1935 		ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
1936 		                           nlh->nlmsg_flags);
1937 		goto out;
1938 	case DCB_CMD_PGTX_GCFG:
1939 		ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1940 		                        nlh->nlmsg_flags);
1941 		goto out;
1942 	case DCB_CMD_PGRX_GCFG:
1943 		ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1944 		                        nlh->nlmsg_flags);
1945 		goto out;
1946 	case DCB_CMD_BCN_GCFG:
1947 		ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1948 		                       nlh->nlmsg_flags);
1949 		goto out;
1950 	case DCB_CMD_SSTATE:
1951 		ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
1952 		                     nlh->nlmsg_flags);
1953 		goto out;
1954 	case DCB_CMD_PFC_SCFG:
1955 		ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1956 		                      nlh->nlmsg_flags);
1957 		goto out;
1958 
1959 	case DCB_CMD_SET_ALL:
1960 		ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
1961 		                   nlh->nlmsg_flags);
1962 		goto out;
1963 	case DCB_CMD_PGTX_SCFG:
1964 		ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1965 		                        nlh->nlmsg_flags);
1966 		goto out;
1967 	case DCB_CMD_PGRX_SCFG:
1968 		ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1969 		                        nlh->nlmsg_flags);
1970 		goto out;
1971 	case DCB_CMD_GCAP:
1972 		ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
1973 		                   nlh->nlmsg_flags);
1974 		goto out;
1975 	case DCB_CMD_GNUMTCS:
1976 		ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1977 		                      nlh->nlmsg_flags);
1978 		goto out;
1979 	case DCB_CMD_SNUMTCS:
1980 		ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1981 		                      nlh->nlmsg_flags);
1982 		goto out;
1983 	case DCB_CMD_PFC_GSTATE:
1984 		ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1985 		                        nlh->nlmsg_flags);
1986 		goto out;
1987 	case DCB_CMD_PFC_SSTATE:
1988 		ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1989 		                        nlh->nlmsg_flags);
1990 		goto out;
1991 	case DCB_CMD_BCN_SCFG:
1992 		ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1993 		                       nlh->nlmsg_flags);
1994 		goto out;
1995 	case DCB_CMD_GAPP:
1996 		ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
1997 		                   nlh->nlmsg_flags);
1998 		goto out;
1999 	case DCB_CMD_SAPP:
2000 		ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
2001 		                   nlh->nlmsg_flags);
2002 		goto out;
2003 	case DCB_CMD_IEEE_SET:
2004 		ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
2005 				     nlh->nlmsg_flags);
2006 		goto out;
2007 	case DCB_CMD_IEEE_GET:
2008 		ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
2009 				     nlh->nlmsg_flags);
2010 		goto out;
2011 	case DCB_CMD_IEEE_DEL:
2012 		ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq,
2013 				     nlh->nlmsg_flags);
2014 		goto out;
2015 	case DCB_CMD_GDCBX:
2016 		ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2017 				    nlh->nlmsg_flags);
2018 		goto out;
2019 	case DCB_CMD_SDCBX:
2020 		ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2021 				    nlh->nlmsg_flags);
2022 		goto out;
2023 	case DCB_CMD_GFEATCFG:
2024 		ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2025 				       nlh->nlmsg_flags);
2026 		goto out;
2027 	case DCB_CMD_SFEATCFG:
2028 		ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2029 				       nlh->nlmsg_flags);
2030 		goto out;
2031 	case DCB_CMD_CEE_GET:
2032 		ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq,
2033 				    nlh->nlmsg_flags);
2034 		goto out;
2035 	default:
2036 		goto errout;
2037 	}
2038 errout:
2039 	ret = -EINVAL;
2040 out:
2041 	dev_put(netdev);
2042 	return ret;
2043 }
2044 
2045 /**
2046  * dcb_getapp - retrieve the DCBX application user priority
2047  *
2048  * On success returns a non-zero 802.1p user priority bitmap
2049  * otherwise returns 0 as the invalid user priority bitmap to
2050  * indicate an error.
2051  */
dcb_getapp(struct net_device * dev,struct dcb_app * app)2052 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
2053 {
2054 	struct dcb_app_type *itr;
2055 	u8 prio = 0;
2056 
2057 	spin_lock(&dcb_lock);
2058 	list_for_each_entry(itr, &dcb_app_list, list) {
2059 		if (itr->app.selector == app->selector &&
2060 		    itr->app.protocol == app->protocol &&
2061 		    itr->ifindex == dev->ifindex) {
2062 			prio = itr->app.priority;
2063 			break;
2064 		}
2065 	}
2066 	spin_unlock(&dcb_lock);
2067 
2068 	return prio;
2069 }
2070 EXPORT_SYMBOL(dcb_getapp);
2071 
2072 /**
2073  * dcb_setapp - add CEE dcb application data to app list
2074  *
2075  * Priority 0 is an invalid priority in CEE spec. This routine
2076  * removes applications from the app list if the priority is
2077  * set to zero.
2078  */
dcb_setapp(struct net_device * dev,struct dcb_app * new)2079 int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2080 {
2081 	struct dcb_app_type *itr;
2082 	struct dcb_app_type event;
2083 
2084 	event.ifindex = dev->ifindex;
2085 	memcpy(&event.app, new, sizeof(event.app));
2086 	if (dev->dcbnl_ops->getdcbx)
2087 		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2088 
2089 	spin_lock(&dcb_lock);
2090 	/* Search for existing match and replace */
2091 	list_for_each_entry(itr, &dcb_app_list, list) {
2092 		if (itr->app.selector == new->selector &&
2093 		    itr->app.protocol == new->protocol &&
2094 		    itr->ifindex == dev->ifindex) {
2095 			if (new->priority)
2096 				itr->app.priority = new->priority;
2097 			else {
2098 				list_del(&itr->list);
2099 				kfree(itr);
2100 			}
2101 			goto out;
2102 		}
2103 	}
2104 	/* App type does not exist add new application type */
2105 	if (new->priority) {
2106 		struct dcb_app_type *entry;
2107 		entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2108 		if (!entry) {
2109 			spin_unlock(&dcb_lock);
2110 			return -ENOMEM;
2111 		}
2112 
2113 		memcpy(&entry->app, new, sizeof(*new));
2114 		entry->ifindex = dev->ifindex;
2115 		list_add(&entry->list, &dcb_app_list);
2116 	}
2117 out:
2118 	spin_unlock(&dcb_lock);
2119 	call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2120 	return 0;
2121 }
2122 EXPORT_SYMBOL(dcb_setapp);
2123 
2124 /**
2125  * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
2126  *
2127  * Helper routine which on success returns a non-zero 802.1Qaz user
2128  * priority bitmap otherwise returns 0 to indicate the dcb_app was
2129  * not found in APP list.
2130  */
dcb_ieee_getapp_mask(struct net_device * dev,struct dcb_app * app)2131 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
2132 {
2133 	struct dcb_app_type *itr;
2134 	u8 prio = 0;
2135 
2136 	spin_lock(&dcb_lock);
2137 	list_for_each_entry(itr, &dcb_app_list, list) {
2138 		if (itr->app.selector == app->selector &&
2139 		    itr->app.protocol == app->protocol &&
2140 		    itr->ifindex == dev->ifindex) {
2141 			prio |= 1 << itr->app.priority;
2142 		}
2143 	}
2144 	spin_unlock(&dcb_lock);
2145 
2146 	return prio;
2147 }
2148 EXPORT_SYMBOL(dcb_ieee_getapp_mask);
2149 
2150 /**
2151  * dcb_ieee_setapp - add IEEE dcb application data to app list
2152  *
2153  * This adds Application data to the list. Multiple application
2154  * entries may exists for the same selector and protocol as long
2155  * as the priorities are different.
2156  */
dcb_ieee_setapp(struct net_device * dev,struct dcb_app * new)2157 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2158 {
2159 	struct dcb_app_type *itr, *entry;
2160 	struct dcb_app_type event;
2161 	int err = 0;
2162 
2163 	event.ifindex = dev->ifindex;
2164 	memcpy(&event.app, new, sizeof(event.app));
2165 	if (dev->dcbnl_ops->getdcbx)
2166 		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2167 
2168 	spin_lock(&dcb_lock);
2169 	/* Search for existing match and abort if found */
2170 	list_for_each_entry(itr, &dcb_app_list, list) {
2171 		if (itr->app.selector == new->selector &&
2172 		    itr->app.protocol == new->protocol &&
2173 		    itr->app.priority == new->priority &&
2174 		    itr->ifindex == dev->ifindex) {
2175 			err = -EEXIST;
2176 			goto out;
2177 		}
2178 	}
2179 
2180 	/* App entry does not exist add new entry */
2181 	entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2182 	if (!entry) {
2183 		err = -ENOMEM;
2184 		goto out;
2185 	}
2186 
2187 	memcpy(&entry->app, new, sizeof(*new));
2188 	entry->ifindex = dev->ifindex;
2189 	list_add(&entry->list, &dcb_app_list);
2190 out:
2191 	spin_unlock(&dcb_lock);
2192 	if (!err)
2193 		call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2194 	return err;
2195 }
2196 EXPORT_SYMBOL(dcb_ieee_setapp);
2197 
2198 /**
2199  * dcb_ieee_delapp - delete IEEE dcb application data from list
2200  *
2201  * This removes a matching APP data from the APP list
2202  */
dcb_ieee_delapp(struct net_device * dev,struct dcb_app * del)2203 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2204 {
2205 	struct dcb_app_type *itr;
2206 	struct dcb_app_type event;
2207 	int err = -ENOENT;
2208 
2209 	event.ifindex = dev->ifindex;
2210 	memcpy(&event.app, del, sizeof(event.app));
2211 	if (dev->dcbnl_ops->getdcbx)
2212 		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2213 
2214 	spin_lock(&dcb_lock);
2215 	/* Search for existing match and remove it. */
2216 	list_for_each_entry(itr, &dcb_app_list, list) {
2217 		if (itr->app.selector == del->selector &&
2218 		    itr->app.protocol == del->protocol &&
2219 		    itr->app.priority == del->priority &&
2220 		    itr->ifindex == dev->ifindex) {
2221 			list_del(&itr->list);
2222 			kfree(itr);
2223 			err = 0;
2224 			goto out;
2225 		}
2226 	}
2227 
2228 out:
2229 	spin_unlock(&dcb_lock);
2230 	if (!err)
2231 		call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2232 	return err;
2233 }
2234 EXPORT_SYMBOL(dcb_ieee_delapp);
2235 
dcb_flushapp(void)2236 static void dcb_flushapp(void)
2237 {
2238 	struct dcb_app_type *app;
2239 	struct dcb_app_type *tmp;
2240 
2241 	spin_lock(&dcb_lock);
2242 	list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
2243 		list_del(&app->list);
2244 		kfree(app);
2245 	}
2246 	spin_unlock(&dcb_lock);
2247 }
2248 
dcbnl_init(void)2249 static int __init dcbnl_init(void)
2250 {
2251 	INIT_LIST_HEAD(&dcb_app_list);
2252 
2253 	rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL);
2254 	rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL);
2255 
2256 	return 0;
2257 }
2258 module_init(dcbnl_init);
2259 
dcbnl_exit(void)2260 static void __exit dcbnl_exit(void)
2261 {
2262 	rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
2263 	rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
2264 	dcb_flushapp();
2265 }
2266 module_exit(dcbnl_exit);
2267