1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2011, Intel Corporation.
5 * Description: Data Center Bridging netlink interface
6 * Author: Lucy Liu <lucy.liu@intel.com>
9 #include <linux/netdevice.h>
10 #include <linux/netlink.h>
11 #include <linux/slab.h>
12 #include <net/netlink.h>
13 #include <net/rtnetlink.h>
14 #include <linux/dcbnl.h>
15 #include <net/dcbevent.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/init.h>
20 /* Data Center Bridging (DCB) is a collection of Ethernet enhancements
21 * intended to allow network traffic with differing requirements
22 * (highly reliable, no drops vs. best effort vs. low latency) to operate
23 * and co-exist on Ethernet. Current DCB features are:
25 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
26 * framework for assigning bandwidth guarantees to traffic classes.
28 * Priority-based Flow Control (PFC) - provides a flow control mechanism which
29 * can work independently for each 802.1p priority.
31 * Congestion Notification - provides a mechanism for end-to-end congestion
32 * control for protocols which do not have built-in congestion management.
34 * More information about the emerging standards for these Ethernet features
35 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
37 * This file implements an rtnetlink interface to allow configuration of DCB
38 * features for capable devices.
41 /**************** DCB attribute policies *************************************/
43 /* DCB netlink attributes policy */
44 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
45 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
46 [DCB_ATTR_STATE] = {.type = NLA_U8},
47 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
48 [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
49 [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
50 [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
51 [DCB_ATTR_CAP] = {.type = NLA_NESTED},
52 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
53 [DCB_ATTR_BCN] = {.type = NLA_NESTED},
54 [DCB_ATTR_APP] = {.type = NLA_NESTED},
55 [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
56 [DCB_ATTR_DCBX] = {.type = NLA_U8},
57 [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
60 /* DCB priority flow control to User Priority nested attributes */
61 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
62 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
63 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
64 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
65 [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
66 [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
67 [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
68 [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
69 [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
70 [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
73 /* DCB priority grouping nested attributes */
74 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
75 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
76 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
77 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
78 [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
79 [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
80 [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
81 [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
82 [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
83 [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
84 [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
85 [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
86 [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
87 [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
88 [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
89 [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
90 [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
91 [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
92 [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
95 /* DCB traffic class nested attributes. */
96 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
97 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
98 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
99 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
100 [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
101 [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
104 /* DCB capabilities nested attributes. */
105 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
106 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
107 [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
108 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
109 [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
110 [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
111 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
112 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
113 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
114 [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
117 /* DCB capabilities nested attributes. */
118 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
119 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
120 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
121 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
124 /* DCB BCN nested attributes. */
125 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
126 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
127 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
128 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
129 [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
130 [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
131 [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
132 [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
133 [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
134 [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
135 [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
136 [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
137 [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
138 [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
139 [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
140 [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
141 [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
142 [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
143 [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
144 [DCB_BCN_ATTR_W] = {.type = NLA_U32},
145 [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
146 [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
147 [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
148 [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
149 [DCB_BCN_ATTR_C] = {.type = NLA_U32},
150 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
153 /* DCB APP nested attributes. */
154 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
155 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
156 [DCB_APP_ATTR_ID] = {.type = NLA_U16},
157 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
160 /* IEEE 802.1Qaz nested attributes. */
161 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
162 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
163 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
164 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
165 [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
166 [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)},
167 [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
168 [DCB_ATTR_DCB_BUFFER] = {.len = sizeof(struct dcbnl_buffer)},
169 [DCB_ATTR_DCB_APP_TRUST_TABLE] = {.type = NLA_NESTED},
172 /* DCB number of traffic classes nested attributes. */
173 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
174 [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
175 [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
176 [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
177 [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
180 static LIST_HEAD(dcb_app_list);
181 static DEFINE_SPINLOCK(dcb_lock);
183 static enum ieee_attrs_app dcbnl_app_attr_type_get(u8 selector)
186 case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
187 case IEEE_8021QAZ_APP_SEL_STREAM:
188 case IEEE_8021QAZ_APP_SEL_DGRAM:
189 case IEEE_8021QAZ_APP_SEL_ANY:
190 case IEEE_8021QAZ_APP_SEL_DSCP:
191 return DCB_ATTR_IEEE_APP;
192 case DCB_APP_SEL_PCP:
193 return DCB_ATTR_DCB_APP;
195 return DCB_ATTR_IEEE_APP_UNSPEC;
199 static bool dcbnl_app_attr_type_validate(enum ieee_attrs_app type)
202 case DCB_ATTR_IEEE_APP:
203 case DCB_ATTR_DCB_APP:
210 static bool dcbnl_app_selector_validate(enum ieee_attrs_app type, u8 selector)
212 return dcbnl_app_attr_type_get(selector) == type;
215 static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
216 u32 flags, struct nlmsghdr **nlhp)
220 struct nlmsghdr *nlh;
222 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
226 nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
229 dcb = nlmsg_data(nlh);
230 dcb->dcb_family = AF_UNSPEC;
240 static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
241 u32 seq, struct nlattr **tb, struct sk_buff *skb)
243 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
244 if (!netdev->dcbnl_ops->getstate)
247 return nla_put_u8(skb, DCB_ATTR_STATE,
248 netdev->dcbnl_ops->getstate(netdev));
251 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
252 u32 seq, struct nlattr **tb, struct sk_buff *skb)
254 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
260 if (!tb[DCB_ATTR_PFC_CFG])
263 if (!netdev->dcbnl_ops->getpfccfg)
266 ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
267 tb[DCB_ATTR_PFC_CFG],
268 dcbnl_pfc_up_nest, NULL);
272 nest = nla_nest_start_noflag(skb, DCB_ATTR_PFC_CFG);
276 if (data[DCB_PFC_UP_ATTR_ALL])
279 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
280 if (!getall && !data[i])
283 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
285 ret = nla_put_u8(skb, i, value);
287 nla_nest_cancel(skb, nest);
291 nla_nest_end(skb, nest);
296 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
297 u32 seq, struct nlattr **tb, struct sk_buff *skb)
299 u8 perm_addr[MAX_ADDR_LEN];
301 if (!netdev->dcbnl_ops->getpermhwaddr)
304 memset(perm_addr, 0, sizeof(perm_addr));
305 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
307 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
310 static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
311 u32 seq, struct nlattr **tb, struct sk_buff *skb)
313 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
319 if (!tb[DCB_ATTR_CAP])
322 if (!netdev->dcbnl_ops->getcap)
325 ret = nla_parse_nested_deprecated(data, DCB_CAP_ATTR_MAX,
326 tb[DCB_ATTR_CAP], dcbnl_cap_nest,
331 nest = nla_nest_start_noflag(skb, DCB_ATTR_CAP);
335 if (data[DCB_CAP_ATTR_ALL])
338 for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
339 if (!getall && !data[i])
342 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
343 ret = nla_put_u8(skb, i, value);
345 nla_nest_cancel(skb, nest);
350 nla_nest_end(skb, nest);
355 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
356 u32 seq, struct nlattr **tb, struct sk_buff *skb)
358 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
364 if (!tb[DCB_ATTR_NUMTCS])
367 if (!netdev->dcbnl_ops->getnumtcs)
370 ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
372 dcbnl_numtcs_nest, NULL);
376 nest = nla_nest_start_noflag(skb, DCB_ATTR_NUMTCS);
380 if (data[DCB_NUMTCS_ATTR_ALL])
383 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
384 if (!getall && !data[i])
387 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
389 ret = nla_put_u8(skb, i, value);
391 nla_nest_cancel(skb, nest);
397 nla_nest_end(skb, nest);
402 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
403 u32 seq, struct nlattr **tb, struct sk_buff *skb)
405 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
410 if (!tb[DCB_ATTR_NUMTCS])
413 if (!netdev->dcbnl_ops->setnumtcs)
416 ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
418 dcbnl_numtcs_nest, NULL);
422 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
426 value = nla_get_u8(data[i]);
428 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
433 return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
436 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
437 u32 seq, struct nlattr **tb, struct sk_buff *skb)
439 if (!netdev->dcbnl_ops->getpfcstate)
442 return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
443 netdev->dcbnl_ops->getpfcstate(netdev));
446 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
447 u32 seq, struct nlattr **tb, struct sk_buff *skb)
451 if (!tb[DCB_ATTR_PFC_STATE])
454 if (!netdev->dcbnl_ops->setpfcstate)
457 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
459 netdev->dcbnl_ops->setpfcstate(netdev, value);
461 return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
464 static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
465 u32 seq, struct nlattr **tb, struct sk_buff *skb)
467 struct nlattr *app_nest;
468 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
473 if (!tb[DCB_ATTR_APP])
476 ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
477 tb[DCB_ATTR_APP], dcbnl_app_nest,
482 /* all must be non-null */
483 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
484 (!app_tb[DCB_APP_ATTR_ID]))
487 /* either by eth type or by socket number */
488 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
489 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
490 (idtype != DCB_APP_IDTYPE_PORTNUM))
493 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
495 if (netdev->dcbnl_ops->getapp) {
496 ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
502 struct dcb_app app = {
506 up = dcb_getapp(netdev, &app);
509 app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP);
513 ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
517 ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
521 ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
525 nla_nest_end(skb, app_nest);
530 nla_nest_cancel(skb, app_nest);
534 static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
535 u32 seq, struct nlattr **tb, struct sk_buff *skb)
540 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
542 if (!tb[DCB_ATTR_APP])
545 ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
546 tb[DCB_ATTR_APP], dcbnl_app_nest,
551 /* all must be non-null */
552 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
553 (!app_tb[DCB_APP_ATTR_ID]) ||
554 (!app_tb[DCB_APP_ATTR_PRIORITY]))
557 /* either by eth type or by socket number */
558 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
559 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
560 (idtype != DCB_APP_IDTYPE_PORTNUM))
563 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
564 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
566 if (netdev->dcbnl_ops->setapp) {
567 ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
572 app.selector = idtype;
575 ret = dcb_setapp(netdev, &app);
578 ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
579 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
584 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
585 struct nlattr **tb, struct sk_buff *skb, int dir)
587 struct nlattr *pg_nest, *param_nest, *data;
588 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
589 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
590 u8 prio, pgid, tc_pct, up_map;
595 if (!tb[DCB_ATTR_PG_CFG])
598 if (!netdev->dcbnl_ops->getpgtccfgtx ||
599 !netdev->dcbnl_ops->getpgtccfgrx ||
600 !netdev->dcbnl_ops->getpgbwgcfgtx ||
601 !netdev->dcbnl_ops->getpgbwgcfgrx)
604 ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
605 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
610 pg_nest = nla_nest_start_noflag(skb, DCB_ATTR_PG_CFG);
614 if (pg_tb[DCB_PG_ATTR_TC_ALL])
617 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
618 if (!getall && !pg_tb[i])
621 if (pg_tb[DCB_PG_ATTR_TC_ALL])
622 data = pg_tb[DCB_PG_ATTR_TC_ALL];
625 ret = nla_parse_nested_deprecated(param_tb,
626 DCB_TC_ATTR_PARAM_MAX, data,
627 dcbnl_tc_param_nest, NULL);
631 param_nest = nla_nest_start_noflag(skb, i);
635 pgid = DCB_ATTR_VALUE_UNDEFINED;
636 prio = DCB_ATTR_VALUE_UNDEFINED;
637 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
638 up_map = DCB_ATTR_VALUE_UNDEFINED;
642 netdev->dcbnl_ops->getpgtccfgrx(netdev,
643 i - DCB_PG_ATTR_TC_0, &prio,
644 &pgid, &tc_pct, &up_map);
647 netdev->dcbnl_ops->getpgtccfgtx(netdev,
648 i - DCB_PG_ATTR_TC_0, &prio,
649 &pgid, &tc_pct, &up_map);
652 if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
653 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
654 ret = nla_put_u8(skb,
655 DCB_TC_ATTR_PARAM_PGID, pgid);
659 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
660 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
661 ret = nla_put_u8(skb,
662 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
666 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
667 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
668 ret = nla_put_u8(skb,
669 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
673 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
674 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
675 ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
680 nla_nest_end(skb, param_nest);
683 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
688 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
689 if (!getall && !pg_tb[i])
692 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
696 netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
697 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
700 netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
701 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
703 ret = nla_put_u8(skb, i, tc_pct);
708 nla_nest_end(skb, pg_nest);
713 nla_nest_cancel(skb, param_nest);
715 nla_nest_cancel(skb, pg_nest);
720 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
721 u32 seq, struct nlattr **tb, struct sk_buff *skb)
723 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
726 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
727 u32 seq, struct nlattr **tb, struct sk_buff *skb)
729 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
732 static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
733 u32 seq, struct nlattr **tb, struct sk_buff *skb)
737 if (!tb[DCB_ATTR_STATE])
740 if (!netdev->dcbnl_ops->setstate)
743 value = nla_get_u8(tb[DCB_ATTR_STATE]);
745 return nla_put_u8(skb, DCB_ATTR_STATE,
746 netdev->dcbnl_ops->setstate(netdev, value));
749 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
750 u32 seq, struct nlattr **tb, struct sk_buff *skb)
752 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
757 if (!tb[DCB_ATTR_PFC_CFG])
760 if (!netdev->dcbnl_ops->setpfccfg)
763 ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
764 tb[DCB_ATTR_PFC_CFG],
765 dcbnl_pfc_up_nest, NULL);
769 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
772 value = nla_get_u8(data[i]);
773 netdev->dcbnl_ops->setpfccfg(netdev,
774 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
777 return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
780 static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
781 u32 seq, struct nlattr **tb, struct sk_buff *skb)
785 if (!tb[DCB_ATTR_SET_ALL])
788 if (!netdev->dcbnl_ops->setall)
791 ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
792 netdev->dcbnl_ops->setall(netdev));
793 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
798 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
799 u32 seq, struct nlattr **tb, struct sk_buff *skb,
802 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
803 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
811 if (!tb[DCB_ATTR_PG_CFG])
814 if (!netdev->dcbnl_ops->setpgtccfgtx ||
815 !netdev->dcbnl_ops->setpgtccfgrx ||
816 !netdev->dcbnl_ops->setpgbwgcfgtx ||
817 !netdev->dcbnl_ops->setpgbwgcfgrx)
820 ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
821 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
826 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
830 ret = nla_parse_nested_deprecated(param_tb,
831 DCB_TC_ATTR_PARAM_MAX,
833 dcbnl_tc_param_nest, NULL);
837 pgid = DCB_ATTR_VALUE_UNDEFINED;
838 prio = DCB_ATTR_VALUE_UNDEFINED;
839 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
840 up_map = DCB_ATTR_VALUE_UNDEFINED;
842 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
844 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
846 if (param_tb[DCB_TC_ATTR_PARAM_PGID])
847 pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
849 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
850 tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
852 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
854 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
856 /* dir: Tx = 0, Rx = 1 */
859 netdev->dcbnl_ops->setpgtccfgrx(netdev,
860 i - DCB_PG_ATTR_TC_0,
861 prio, pgid, tc_pct, up_map);
864 netdev->dcbnl_ops->setpgtccfgtx(netdev,
865 i - DCB_PG_ATTR_TC_0,
866 prio, pgid, tc_pct, up_map);
870 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
874 tc_pct = nla_get_u8(pg_tb[i]);
876 /* dir: Tx = 0, Rx = 1 */
879 netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
880 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
883 netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
884 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
888 return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
891 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
892 u32 seq, struct nlattr **tb, struct sk_buff *skb)
894 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
897 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
898 u32 seq, struct nlattr **tb, struct sk_buff *skb)
900 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
903 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
904 u32 seq, struct nlattr **tb, struct sk_buff *skb)
906 struct nlattr *bcn_nest;
907 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
914 if (!tb[DCB_ATTR_BCN])
917 if (!netdev->dcbnl_ops->getbcnrp ||
918 !netdev->dcbnl_ops->getbcncfg)
921 ret = nla_parse_nested_deprecated(bcn_tb, DCB_BCN_ATTR_MAX,
922 tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
927 bcn_nest = nla_nest_start_noflag(skb, DCB_ATTR_BCN);
931 if (bcn_tb[DCB_BCN_ATTR_ALL])
934 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
935 if (!getall && !bcn_tb[i])
938 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
940 ret = nla_put_u8(skb, i, value_byte);
945 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
946 if (!getall && !bcn_tb[i])
949 netdev->dcbnl_ops->getbcncfg(netdev, i,
951 ret = nla_put_u32(skb, i, value_integer);
956 nla_nest_end(skb, bcn_nest);
961 nla_nest_cancel(skb, bcn_nest);
965 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
966 u32 seq, struct nlattr **tb, struct sk_buff *skb)
968 struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
974 if (!tb[DCB_ATTR_BCN])
977 if (!netdev->dcbnl_ops->setbcncfg ||
978 !netdev->dcbnl_ops->setbcnrp)
981 ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX,
982 tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest,
987 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
990 value_byte = nla_get_u8(data[i]);
991 netdev->dcbnl_ops->setbcnrp(netdev,
992 data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
995 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
998 value_int = nla_get_u32(data[i]);
999 netdev->dcbnl_ops->setbcncfg(netdev,
1003 return nla_put_u8(skb, DCB_ATTR_BCN, 0);
1006 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
1007 int app_nested_type, int app_info_type,
1010 struct dcb_peer_app_info info;
1011 struct dcb_app *table = NULL;
1012 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1018 * retrieve the peer app configuration form the driver. If the driver
1019 * handlers fail exit without doing anything
1021 err = ops->peer_getappinfo(netdev, &info, &app_count);
1022 if (!err && app_count) {
1023 table = kmalloc_array(app_count, sizeof(struct dcb_app),
1028 err = ops->peer_getapptable(netdev, table);
1036 * build the message, from here on the only possible failure
1037 * is due to the skb size
1041 app = nla_nest_start_noflag(skb, app_nested_type);
1043 goto nla_put_failure;
1045 if (app_info_type &&
1046 nla_put(skb, app_info_type, sizeof(info), &info))
1047 goto nla_put_failure;
1049 for (i = 0; i < app_count; i++) {
1050 if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
1052 goto nla_put_failure;
1054 nla_nest_end(skb, app);
1063 static int dcbnl_getapptrust(struct net_device *netdev, struct sk_buff *skb)
1065 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1066 enum ieee_attrs_app type;
1067 struct nlattr *apptrust;
1068 int nselectors, err, i;
1071 selectors = kzalloc(IEEE_8021QAZ_APP_SEL_MAX + 1, GFP_KERNEL);
1075 err = ops->dcbnl_getapptrust(netdev, selectors, &nselectors);
1081 apptrust = nla_nest_start(skb, DCB_ATTR_DCB_APP_TRUST_TABLE);
1087 for (i = 0; i < nselectors; i++) {
1088 type = dcbnl_app_attr_type_get(selectors[i]);
1089 err = nla_put_u8(skb, type, selectors[i]);
1091 nla_nest_cancel(skb, apptrust);
1095 nla_nest_end(skb, apptrust);
1102 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
1103 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1105 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1106 struct nlattr *ieee, *app;
1107 struct dcb_app_type *itr;
1111 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1114 ieee = nla_nest_start_noflag(skb, DCB_ATTR_IEEE);
1118 if (ops->ieee_getets) {
1119 struct ieee_ets ets;
1120 memset(&ets, 0, sizeof(ets));
1121 err = ops->ieee_getets(netdev, &ets);
1123 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1127 if (ops->ieee_getmaxrate) {
1128 struct ieee_maxrate maxrate;
1129 memset(&maxrate, 0, sizeof(maxrate));
1130 err = ops->ieee_getmaxrate(netdev, &maxrate);
1132 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1133 sizeof(maxrate), &maxrate);
1139 if (ops->ieee_getqcn) {
1140 struct ieee_qcn qcn;
1142 memset(&qcn, 0, sizeof(qcn));
1143 err = ops->ieee_getqcn(netdev, &qcn);
1145 err = nla_put(skb, DCB_ATTR_IEEE_QCN,
1152 if (ops->ieee_getqcnstats) {
1153 struct ieee_qcn_stats qcn_stats;
1155 memset(&qcn_stats, 0, sizeof(qcn_stats));
1156 err = ops->ieee_getqcnstats(netdev, &qcn_stats);
1158 err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
1159 sizeof(qcn_stats), &qcn_stats);
1165 if (ops->ieee_getpfc) {
1166 struct ieee_pfc pfc;
1167 memset(&pfc, 0, sizeof(pfc));
1168 err = ops->ieee_getpfc(netdev, &pfc);
1170 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1174 if (ops->dcbnl_getbuffer) {
1175 struct dcbnl_buffer buffer;
1177 memset(&buffer, 0, sizeof(buffer));
1178 err = ops->dcbnl_getbuffer(netdev, &buffer);
1180 nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer))
1184 app = nla_nest_start_noflag(skb, DCB_ATTR_IEEE_APP_TABLE);
1188 spin_lock_bh(&dcb_lock);
1189 list_for_each_entry(itr, &dcb_app_list, list) {
1190 if (itr->ifindex == netdev->ifindex) {
1191 enum ieee_attrs_app type =
1192 dcbnl_app_attr_type_get(itr->app.selector);
1193 err = nla_put(skb, type, sizeof(itr->app), &itr->app);
1195 spin_unlock_bh(&dcb_lock);
1201 if (netdev->dcbnl_ops->getdcbx)
1202 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1206 spin_unlock_bh(&dcb_lock);
1207 nla_nest_end(skb, app);
1209 if (ops->dcbnl_getapptrust) {
1210 err = dcbnl_getapptrust(netdev, skb);
1215 /* get peer info if available */
1216 if (ops->ieee_peer_getets) {
1217 struct ieee_ets ets;
1218 memset(&ets, 0, sizeof(ets));
1219 err = ops->ieee_peer_getets(netdev, &ets);
1221 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1225 if (ops->ieee_peer_getpfc) {
1226 struct ieee_pfc pfc;
1227 memset(&pfc, 0, sizeof(pfc));
1228 err = ops->ieee_peer_getpfc(netdev, &pfc);
1230 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1234 if (ops->peer_getappinfo && ops->peer_getapptable) {
1235 err = dcbnl_build_peer_app(netdev, skb,
1236 DCB_ATTR_IEEE_PEER_APP,
1237 DCB_ATTR_IEEE_APP_UNSPEC,
1243 nla_nest_end(skb, ieee);
1245 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1253 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1256 u8 pgid, up_map, prio, tc_pct;
1257 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1258 int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1259 struct nlattr *pg = nla_nest_start_noflag(skb, i);
1264 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1265 struct nlattr *tc_nest = nla_nest_start_noflag(skb, i);
1270 pgid = DCB_ATTR_VALUE_UNDEFINED;
1271 prio = DCB_ATTR_VALUE_UNDEFINED;
1272 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1273 up_map = DCB_ATTR_VALUE_UNDEFINED;
1276 ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1277 &prio, &pgid, &tc_pct, &up_map);
1279 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1280 &prio, &pgid, &tc_pct, &up_map);
1282 if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
1283 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1284 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1285 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1287 nla_nest_end(skb, tc_nest);
1290 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1291 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1294 ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1297 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1299 if (nla_put_u8(skb, i, tc_pct))
1302 nla_nest_end(skb, pg);
1306 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1308 struct nlattr *cee, *app;
1309 struct dcb_app_type *itr;
1310 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1311 int dcbx, i, err = -EMSGSIZE;
1314 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1315 goto nla_put_failure;
1316 cee = nla_nest_start_noflag(skb, DCB_ATTR_CEE);
1318 goto nla_put_failure;
1321 if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1322 err = dcbnl_cee_pg_fill(skb, netdev, 1);
1324 goto nla_put_failure;
1327 if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1328 err = dcbnl_cee_pg_fill(skb, netdev, 0);
1330 goto nla_put_failure;
1334 if (ops->getpfccfg) {
1335 struct nlattr *pfc_nest = nla_nest_start_noflag(skb,
1339 goto nla_put_failure;
1341 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1342 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1343 if (nla_put_u8(skb, i, value))
1344 goto nla_put_failure;
1346 nla_nest_end(skb, pfc_nest);
1350 spin_lock_bh(&dcb_lock);
1351 app = nla_nest_start_noflag(skb, DCB_ATTR_CEE_APP_TABLE);
1355 list_for_each_entry(itr, &dcb_app_list, list) {
1356 if (itr->ifindex == netdev->ifindex) {
1357 struct nlattr *app_nest = nla_nest_start_noflag(skb,
1362 err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1367 err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1372 err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1377 nla_nest_end(skb, app_nest);
1380 nla_nest_end(skb, app);
1382 if (netdev->dcbnl_ops->getdcbx)
1383 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1387 spin_unlock_bh(&dcb_lock);
1389 /* features flags */
1390 if (ops->getfeatcfg) {
1391 struct nlattr *feat = nla_nest_start_noflag(skb,
1394 goto nla_put_failure;
1396 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1398 if (!ops->getfeatcfg(netdev, i, &value) &&
1399 nla_put_u8(skb, i, value))
1400 goto nla_put_failure;
1402 nla_nest_end(skb, feat);
1405 /* peer info if available */
1406 if (ops->cee_peer_getpg) {
1408 memset(&pg, 0, sizeof(pg));
1409 err = ops->cee_peer_getpg(netdev, &pg);
1411 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
1412 goto nla_put_failure;
1415 if (ops->cee_peer_getpfc) {
1417 memset(&pfc, 0, sizeof(pfc));
1418 err = ops->cee_peer_getpfc(netdev, &pfc);
1420 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
1421 goto nla_put_failure;
1424 if (ops->peer_getappinfo && ops->peer_getapptable) {
1425 err = dcbnl_build_peer_app(netdev, skb,
1426 DCB_ATTR_CEE_PEER_APP_TABLE,
1427 DCB_ATTR_CEE_PEER_APP_INFO,
1428 DCB_ATTR_CEE_PEER_APP);
1430 goto nla_put_failure;
1432 nla_nest_end(skb, cee);
1436 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1438 goto nla_put_failure;
1443 spin_unlock_bh(&dcb_lock);
1449 static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1450 u32 seq, u32 portid, int dcbx_ver)
1452 struct net *net = dev_net(dev);
1453 struct sk_buff *skb;
1454 struct nlmsghdr *nlh;
1455 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1461 skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
1465 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1466 err = dcbnl_ieee_fill(skb, dev);
1468 err = dcbnl_cee_fill(skb, dev);
1471 /* Report error to broadcast listeners */
1473 rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1475 /* End nlmsg and notify broadcast listeners */
1476 nlmsg_end(skb, nlh);
1477 rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1483 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1484 u32 seq, u32 portid)
1486 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
1488 EXPORT_SYMBOL(dcbnl_ieee_notify);
1490 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1491 u32 seq, u32 portid)
1493 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
1495 EXPORT_SYMBOL(dcbnl_cee_notify);
1497 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
1498 * If any requested operation can not be completed
1499 * the entire msg is aborted and error value is returned.
1500 * No attempt is made to reconcile the case where only part of the
1501 * cmd can be completed.
1503 static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
1504 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1506 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1507 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1514 if (!tb[DCB_ATTR_IEEE])
1517 err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
1519 dcbnl_ieee_policy, NULL);
1523 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1524 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1525 err = ops->ieee_setets(netdev, ets);
1530 if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
1531 struct ieee_maxrate *maxrate =
1532 nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
1533 err = ops->ieee_setmaxrate(netdev, maxrate);
1538 if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
1539 struct ieee_qcn *qcn =
1540 nla_data(ieee[DCB_ATTR_IEEE_QCN]);
1542 err = ops->ieee_setqcn(netdev, qcn);
1547 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1548 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1549 err = ops->ieee_setpfc(netdev, pfc);
1554 if (ieee[DCB_ATTR_DCB_BUFFER] && ops->dcbnl_setbuffer) {
1555 struct dcbnl_buffer *buffer =
1556 nla_data(ieee[DCB_ATTR_DCB_BUFFER]);
1558 for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) {
1559 if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) {
1565 err = ops->dcbnl_setbuffer(netdev, buffer);
1570 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1571 struct nlattr *attr;
1574 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1575 enum ieee_attrs_app type = nla_type(attr);
1576 struct dcb_app *app_data;
1578 if (!dcbnl_app_attr_type_validate(type))
1581 if (nla_len(attr) < sizeof(struct dcb_app)) {
1586 app_data = nla_data(attr);
1588 if (!dcbnl_app_selector_validate(type,
1589 app_data->selector)) {
1594 if (ops->ieee_setapp)
1595 err = ops->ieee_setapp(netdev, app_data);
1597 err = dcb_ieee_setapp(netdev, app_data);
1603 if (ieee[DCB_ATTR_DCB_APP_TRUST_TABLE]) {
1604 u8 selectors[IEEE_8021QAZ_APP_SEL_MAX + 1] = {0};
1605 struct nlattr *attr;
1609 if (!ops->dcbnl_setapptrust) {
1614 nla_for_each_nested(attr, ieee[DCB_ATTR_DCB_APP_TRUST_TABLE],
1616 enum ieee_attrs_app type = nla_type(attr);
1620 if (!dcbnl_app_attr_type_validate(type) ||
1621 nla_len(attr) != 1 ||
1622 nselectors >= sizeof(selectors)) {
1627 selector = nla_get_u8(attr);
1629 if (!dcbnl_app_selector_validate(type, selector)) {
1634 /* Duplicate selector ? */
1635 for (i = 0; i < nselectors; i++) {
1636 if (selectors[i] == selector) {
1642 selectors[nselectors++] = selector;
1645 err = ops->dcbnl_setapptrust(netdev, selectors, nselectors);
1651 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1652 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1656 static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1657 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1659 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1664 return dcbnl_ieee_fill(skb, netdev);
1667 static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
1668 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1670 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1671 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1677 if (!tb[DCB_ATTR_IEEE])
1680 err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
1682 dcbnl_ieee_policy, NULL);
1686 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1687 struct nlattr *attr;
1690 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1691 enum ieee_attrs_app type = nla_type(attr);
1692 struct dcb_app *app_data;
1694 if (!dcbnl_app_attr_type_validate(type))
1697 app_data = nla_data(attr);
1699 if (!dcbnl_app_selector_validate(type,
1700 app_data->selector)) {
1705 if (ops->ieee_delapp)
1706 err = ops->ieee_delapp(netdev, app_data);
1708 err = dcb_ieee_delapp(netdev, app_data);
1715 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1716 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1721 /* DCBX configuration */
1722 static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1723 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1725 if (!netdev->dcbnl_ops->getdcbx)
1728 return nla_put_u8(skb, DCB_ATTR_DCBX,
1729 netdev->dcbnl_ops->getdcbx(netdev));
1732 static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1733 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1737 if (!netdev->dcbnl_ops->setdcbx)
1740 if (!tb[DCB_ATTR_DCBX])
1743 value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1745 return nla_put_u8(skb, DCB_ATTR_DCBX,
1746 netdev->dcbnl_ops->setdcbx(netdev, value));
1749 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1750 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1752 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1757 if (!netdev->dcbnl_ops->getfeatcfg)
1760 if (!tb[DCB_ATTR_FEATCFG])
1763 ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
1764 tb[DCB_ATTR_FEATCFG],
1765 dcbnl_featcfg_nest, NULL);
1769 nest = nla_nest_start_noflag(skb, DCB_ATTR_FEATCFG);
1773 if (data[DCB_FEATCFG_ATTR_ALL])
1776 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1777 if (!getall && !data[i])
1780 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1782 ret = nla_put_u8(skb, i, value);
1785 nla_nest_cancel(skb, nest);
1786 goto nla_put_failure;
1789 nla_nest_end(skb, nest);
1795 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1796 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1798 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1802 if (!netdev->dcbnl_ops->setfeatcfg)
1805 if (!tb[DCB_ATTR_FEATCFG])
1808 ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
1809 tb[DCB_ATTR_FEATCFG],
1810 dcbnl_featcfg_nest, NULL);
1815 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1816 if (data[i] == NULL)
1819 value = nla_get_u8(data[i]);
1821 ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1827 ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
1832 /* Handle CEE DCBX GET commands. */
1833 static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1834 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1836 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1841 return dcbnl_cee_fill(skb, netdev);
1845 /* reply netlink message type */
1848 /* function to fill message contents */
1849 int (*cb)(struct net_device *, struct nlmsghdr *, u32,
1850 struct nlattr **, struct sk_buff *);
1853 static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
1854 [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
1855 [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
1856 [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
1857 [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
1858 [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
1859 [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
1860 [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
1861 [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
1862 [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
1863 [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
1864 [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
1865 [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
1866 [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
1867 [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
1868 [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
1869 [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
1870 [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
1871 [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
1872 [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
1873 [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
1874 [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
1875 [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
1876 [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
1877 [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
1878 [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
1879 [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
1880 [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
1883 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1884 struct netlink_ext_ack *extack)
1886 struct net *net = sock_net(skb->sk);
1887 struct net_device *netdev;
1888 struct dcbmsg *dcb = nlmsg_data(nlh);
1889 struct nlattr *tb[DCB_ATTR_MAX + 1];
1890 u32 portid = NETLINK_CB(skb).portid;
1892 struct sk_buff *reply_skb;
1893 struct nlmsghdr *reply_nlh = NULL;
1894 const struct reply_func *fn;
1896 if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
1899 ret = nlmsg_parse_deprecated(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1900 dcbnl_rtnl_policy, extack);
1904 if (dcb->cmd > DCB_CMD_MAX)
1907 /* check if a reply function has been defined for the command */
1908 fn = &reply_funcs[dcb->cmd];
1911 if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
1914 if (!tb[DCB_ATTR_IFNAME])
1917 netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
1921 if (!netdev->dcbnl_ops)
1924 reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
1925 nlh->nlmsg_flags, &reply_nlh);
1929 ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
1931 nlmsg_free(reply_skb);
1935 nlmsg_end(reply_skb, reply_nlh);
1937 ret = rtnl_unicast(reply_skb, net, portid);
1942 static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
1943 int ifindex, int prio)
1945 struct dcb_app_type *itr;
1947 list_for_each_entry(itr, &dcb_app_list, list) {
1948 if (itr->app.selector == app->selector &&
1949 itr->app.protocol == app->protocol &&
1950 itr->ifindex == ifindex &&
1951 ((prio == -1) || itr->app.priority == prio))
1958 static int dcb_app_add(const struct dcb_app *app, int ifindex)
1960 struct dcb_app_type *entry;
1962 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1966 memcpy(&entry->app, app, sizeof(*app));
1967 entry->ifindex = ifindex;
1968 list_add(&entry->list, &dcb_app_list);
1974 * dcb_getapp - retrieve the DCBX application user priority
1975 * @dev: network interface
1976 * @app: application to get user priority of
1978 * On success returns a non-zero 802.1p user priority bitmap
1979 * otherwise returns 0 as the invalid user priority bitmap to
1980 * indicate an error.
1982 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
1984 struct dcb_app_type *itr;
1987 spin_lock_bh(&dcb_lock);
1988 itr = dcb_app_lookup(app, dev->ifindex, -1);
1990 prio = itr->app.priority;
1991 spin_unlock_bh(&dcb_lock);
1995 EXPORT_SYMBOL(dcb_getapp);
1998 * dcb_setapp - add CEE dcb application data to app list
1999 * @dev: network interface
2000 * @new: application data to add
2002 * Priority 0 is an invalid priority in CEE spec. This routine
2003 * removes applications from the app list if the priority is
2004 * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
2006 int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2008 struct dcb_app_type *itr;
2009 struct dcb_app_type event;
2012 event.ifindex = dev->ifindex;
2013 memcpy(&event.app, new, sizeof(event.app));
2014 if (dev->dcbnl_ops->getdcbx)
2015 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2017 spin_lock_bh(&dcb_lock);
2018 /* Search for existing match and replace */
2019 itr = dcb_app_lookup(new, dev->ifindex, -1);
2022 itr->app.priority = new->priority;
2024 list_del(&itr->list);
2029 /* App type does not exist add new application type */
2031 err = dcb_app_add(new, dev->ifindex);
2033 spin_unlock_bh(&dcb_lock);
2035 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2038 EXPORT_SYMBOL(dcb_setapp);
2041 * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
2042 * @dev: network interface
2043 * @app: where to store the retrieve application data
2045 * Helper routine which on success returns a non-zero 802.1Qaz user
2046 * priority bitmap otherwise returns 0 to indicate the dcb_app was
2047 * not found in APP list.
2049 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
2051 struct dcb_app_type *itr;
2054 spin_lock_bh(&dcb_lock);
2055 itr = dcb_app_lookup(app, dev->ifindex, -1);
2057 prio |= 1 << itr->app.priority;
2058 spin_unlock_bh(&dcb_lock);
2062 EXPORT_SYMBOL(dcb_ieee_getapp_mask);
2065 * dcb_ieee_setapp - add IEEE dcb application data to app list
2066 * @dev: network interface
2067 * @new: application data to add
2069 * This adds Application data to the list. Multiple application
2070 * entries may exists for the same selector and protocol as long
2071 * as the priorities are different. Priority is expected to be a
2072 * 3-bit unsigned integer
2074 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2076 struct dcb_app_type event;
2079 event.ifindex = dev->ifindex;
2080 memcpy(&event.app, new, sizeof(event.app));
2081 if (dev->dcbnl_ops->getdcbx)
2082 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2084 spin_lock_bh(&dcb_lock);
2085 /* Search for existing match and abort if found */
2086 if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
2091 err = dcb_app_add(new, dev->ifindex);
2093 spin_unlock_bh(&dcb_lock);
2095 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2098 EXPORT_SYMBOL(dcb_ieee_setapp);
2101 * dcb_ieee_delapp - delete IEEE dcb application data from list
2102 * @dev: network interface
2103 * @del: application data to delete
2105 * This removes a matching APP data from the APP list
2107 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2109 struct dcb_app_type *itr;
2110 struct dcb_app_type event;
2113 event.ifindex = dev->ifindex;
2114 memcpy(&event.app, del, sizeof(event.app));
2115 if (dev->dcbnl_ops->getdcbx)
2116 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2118 spin_lock_bh(&dcb_lock);
2119 /* Search for existing match and remove it. */
2120 if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
2121 list_del(&itr->list);
2126 spin_unlock_bh(&dcb_lock);
2128 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2131 EXPORT_SYMBOL(dcb_ieee_delapp);
2134 * dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from
2135 * priorities to the DSCP values assigned to that priority. Initialize p_map
2136 * such that each map element holds a bit mask of DSCP values configured for
2137 * that priority by APP entries.
2139 void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
2140 struct dcb_ieee_app_prio_map *p_map)
2142 int ifindex = dev->ifindex;
2143 struct dcb_app_type *itr;
2146 memset(p_map->map, 0, sizeof(p_map->map));
2148 spin_lock_bh(&dcb_lock);
2149 list_for_each_entry(itr, &dcb_app_list, list) {
2150 if (itr->ifindex == ifindex &&
2151 itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
2152 itr->app.protocol < 64 &&
2153 itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
2154 prio = itr->app.priority;
2155 p_map->map[prio] |= 1ULL << itr->app.protocol;
2158 spin_unlock_bh(&dcb_lock);
2160 EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map);
2163 * dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from
2164 * DSCP values to the priorities assigned to that DSCP value. Initialize p_map
2165 * such that each map element holds a bit mask of priorities configured for a
2166 * given DSCP value by APP entries.
2169 dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
2170 struct dcb_ieee_app_dscp_map *p_map)
2172 int ifindex = dev->ifindex;
2173 struct dcb_app_type *itr;
2175 memset(p_map->map, 0, sizeof(p_map->map));
2177 spin_lock_bh(&dcb_lock);
2178 list_for_each_entry(itr, &dcb_app_list, list) {
2179 if (itr->ifindex == ifindex &&
2180 itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
2181 itr->app.protocol < 64 &&
2182 itr->app.priority < IEEE_8021QAZ_MAX_TCS)
2183 p_map->map[itr->app.protocol] |= 1 << itr->app.priority;
2185 spin_unlock_bh(&dcb_lock);
2187 EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map);
2190 * Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet
2191 * type, with valid PID values >= 1536. A special meaning is then assigned to
2192 * protocol value of 0: "default priority. For use when priority is not
2193 * otherwise specified".
2195 * dcb_ieee_getapp_default_prio_mask - For a given device, find all APP entries
2196 * of the form {$PRIO, ETHERTYPE, 0} and construct a bit mask of all default
2197 * priorities set by these entries.
2199 u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev)
2201 int ifindex = dev->ifindex;
2202 struct dcb_app_type *itr;
2205 spin_lock_bh(&dcb_lock);
2206 list_for_each_entry(itr, &dcb_app_list, list) {
2207 if (itr->ifindex == ifindex &&
2208 itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
2209 itr->app.protocol == 0 &&
2210 itr->app.priority < IEEE_8021QAZ_MAX_TCS)
2211 mask |= 1 << itr->app.priority;
2213 spin_unlock_bh(&dcb_lock);
2217 EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask);
2219 static void dcbnl_flush_dev(struct net_device *dev)
2221 struct dcb_app_type *itr, *tmp;
2223 spin_lock_bh(&dcb_lock);
2225 list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) {
2226 if (itr->ifindex == dev->ifindex) {
2227 list_del(&itr->list);
2232 spin_unlock_bh(&dcb_lock);
2235 static int dcbnl_netdevice_event(struct notifier_block *nb,
2236 unsigned long event, void *ptr)
2238 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2241 case NETDEV_UNREGISTER:
2242 if (!dev->dcbnl_ops)
2245 dcbnl_flush_dev(dev);
2253 static struct notifier_block dcbnl_nb __read_mostly = {
2254 .notifier_call = dcbnl_netdevice_event,
2257 static int __init dcbnl_init(void)
2261 err = register_netdevice_notifier(&dcbnl_nb);
2265 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
2266 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
2270 device_initcall(dcbnl_init);