Commit | Line | Data |
---|---|---|
c7d759eb JK |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | ||
3 | #include <linux/ethtool_netlink.h> | |
4 | #include <net/udp_tunnel.h> | |
966e5059 | 5 | #include <net/vxlan.h> |
c7d759eb JK |
6 | |
7 | #include "bitset.h" | |
8 | #include "common.h" | |
9 | #include "netlink.h" | |
10 | ||
11 | static const struct nla_policy | |
12 | ethtool_tunnel_info_policy[ETHTOOL_A_TUNNEL_INFO_MAX + 1] = { | |
13 | [ETHTOOL_A_TUNNEL_INFO_UNSPEC] = { .type = NLA_REJECT }, | |
14 | [ETHTOOL_A_TUNNEL_INFO_HEADER] = { .type = NLA_NESTED }, | |
15 | }; | |
16 | ||
17 | static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN)); | |
18 | static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE)); | |
19 | static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE == | |
20 | ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE)); | |
21 | ||
966e5059 JK |
22 | static ssize_t ethnl_udp_table_reply_size(unsigned int types, bool compact) |
23 | { | |
24 | ssize_t size; | |
25 | ||
26 | size = ethnl_bitset32_size(&types, NULL, __ETHTOOL_UDP_TUNNEL_TYPE_CNT, | |
27 | udp_tunnel_type_names, compact); | |
28 | if (size < 0) | |
29 | return size; | |
30 | ||
31 | return size + | |
32 | nla_total_size(0) + /* _UDP_TABLE */ | |
33 | nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */ | |
34 | } | |
35 | ||
c7d759eb JK |
36 | static ssize_t |
37 | ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base, | |
38 | struct netlink_ext_ack *extack) | |
39 | { | |
40 | bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; | |
41 | const struct udp_tunnel_nic_info *info; | |
42 | unsigned int i; | |
966e5059 | 43 | ssize_t ret; |
c7d759eb | 44 | size_t size; |
c7d759eb JK |
45 | |
46 | info = req_base->dev->udp_tunnel_nic_info; | |
47 | if (!info) { | |
48 | NL_SET_ERR_MSG(extack, | |
49 | "device does not report tunnel offload info"); | |
50 | return -EOPNOTSUPP; | |
51 | } | |
52 | ||
53 | size = nla_total_size(0); /* _INFO_UDP_PORTS */ | |
54 | ||
55 | for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { | |
56 | if (!info->tables[i].n_entries) | |
966e5059 | 57 | break; |
c7d759eb | 58 | |
966e5059 JK |
59 | ret = ethnl_udp_table_reply_size(info->tables[i].tunnel_types, |
60 | compact); | |
c7d759eb JK |
61 | if (ret < 0) |
62 | return ret; | |
63 | size += ret; | |
64 | ||
65 | size += udp_tunnel_nic_dump_size(req_base->dev, i); | |
66 | } | |
67 | ||
966e5059 JK |
68 | if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) { |
69 | ret = ethnl_udp_table_reply_size(0, compact); | |
70 | if (ret < 0) | |
71 | return ret; | |
72 | size += ret; | |
73 | ||
74 | size += nla_total_size(0) + /* _TABLE_ENTRY */ | |
75 | nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */ | |
76 | nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */ | |
77 | } | |
78 | ||
c7d759eb JK |
79 | return size; |
80 | } | |
81 | ||
82 | static int | |
83 | ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base, | |
84 | struct sk_buff *skb) | |
85 | { | |
86 | bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; | |
87 | const struct udp_tunnel_nic_info *info; | |
966e5059 | 88 | struct nlattr *ports, *table, *entry; |
c7d759eb JK |
89 | unsigned int i; |
90 | ||
91 | info = req_base->dev->udp_tunnel_nic_info; | |
92 | if (!info) | |
93 | return -EOPNOTSUPP; | |
94 | ||
95 | ports = nla_nest_start(skb, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS); | |
96 | if (!ports) | |
97 | return -EMSGSIZE; | |
98 | ||
99 | for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { | |
100 | if (!info->tables[i].n_entries) | |
101 | break; | |
102 | ||
103 | table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE); | |
104 | if (!table) | |
105 | goto err_cancel_ports; | |
106 | ||
107 | if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, | |
108 | info->tables[i].n_entries)) | |
109 | goto err_cancel_table; | |
110 | ||
111 | if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, | |
112 | &info->tables[i].tunnel_types, NULL, | |
113 | __ETHTOOL_UDP_TUNNEL_TYPE_CNT, | |
114 | udp_tunnel_type_names, compact)) | |
115 | goto err_cancel_table; | |
116 | ||
117 | if (udp_tunnel_nic_dump_write(req_base->dev, i, skb)) | |
118 | goto err_cancel_table; | |
119 | ||
120 | nla_nest_end(skb, table); | |
121 | } | |
122 | ||
966e5059 JK |
123 | if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) { |
124 | u32 zero = 0; | |
125 | ||
126 | table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE); | |
127 | if (!table) | |
128 | goto err_cancel_ports; | |
129 | ||
130 | if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, 1)) | |
131 | goto err_cancel_table; | |
132 | ||
133 | if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, | |
134 | &zero, NULL, | |
135 | __ETHTOOL_UDP_TUNNEL_TYPE_CNT, | |
136 | udp_tunnel_type_names, compact)) | |
137 | goto err_cancel_table; | |
138 | ||
139 | entry = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY); | |
140 | ||
141 | if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, | |
142 | htons(IANA_VXLAN_UDP_PORT)) || | |
143 | nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, | |
144 | ilog2(UDP_TUNNEL_TYPE_VXLAN))) | |
145 | goto err_cancel_entry; | |
146 | ||
147 | nla_nest_end(skb, entry); | |
148 | nla_nest_end(skb, table); | |
149 | } | |
150 | ||
c7d759eb JK |
151 | nla_nest_end(skb, ports); |
152 | ||
153 | return 0; | |
154 | ||
966e5059 JK |
155 | err_cancel_entry: |
156 | nla_nest_cancel(skb, entry); | |
c7d759eb JK |
157 | err_cancel_table: |
158 | nla_nest_cancel(skb, table); | |
159 | err_cancel_ports: | |
160 | nla_nest_cancel(skb, ports); | |
161 | return -EMSGSIZE; | |
162 | } | |
163 | ||
164 | static int | |
165 | ethnl_tunnel_info_req_parse(struct ethnl_req_info *req_info, | |
166 | const struct nlmsghdr *nlhdr, struct net *net, | |
167 | struct netlink_ext_ack *extack, bool require_dev) | |
168 | { | |
169 | struct nlattr *tb[ETHTOOL_A_TUNNEL_INFO_MAX + 1]; | |
170 | int ret; | |
171 | ||
172 | ret = nlmsg_parse(nlhdr, GENL_HDRLEN, tb, ETHTOOL_A_TUNNEL_INFO_MAX, | |
173 | ethtool_tunnel_info_policy, extack); | |
174 | if (ret < 0) | |
175 | return ret; | |
176 | ||
177 | return ethnl_parse_header_dev_get(req_info, | |
178 | tb[ETHTOOL_A_TUNNEL_INFO_HEADER], | |
179 | net, extack, require_dev); | |
180 | } | |
181 | ||
182 | int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info) | |
183 | { | |
184 | struct ethnl_req_info req_info = {}; | |
185 | struct sk_buff *rskb; | |
186 | void *reply_payload; | |
187 | int reply_len; | |
188 | int ret; | |
189 | ||
190 | ret = ethnl_tunnel_info_req_parse(&req_info, info->nlhdr, | |
191 | genl_info_net(info), info->extack, | |
192 | true); | |
193 | if (ret < 0) | |
194 | return ret; | |
195 | ||
196 | rtnl_lock(); | |
197 | ret = ethnl_tunnel_info_reply_size(&req_info, info->extack); | |
198 | if (ret < 0) | |
199 | goto err_unlock_rtnl; | |
200 | reply_len = ret + ethnl_reply_header_size(); | |
201 | ||
202 | rskb = ethnl_reply_init(reply_len, req_info.dev, | |
203 | ETHTOOL_MSG_TUNNEL_INFO_GET, | |
204 | ETHTOOL_A_TUNNEL_INFO_HEADER, | |
205 | info, &reply_payload); | |
206 | if (!rskb) { | |
207 | ret = -ENOMEM; | |
208 | goto err_unlock_rtnl; | |
209 | } | |
210 | ||
211 | ret = ethnl_tunnel_info_fill_reply(&req_info, rskb); | |
212 | if (ret) | |
213 | goto err_free_msg; | |
214 | rtnl_unlock(); | |
215 | dev_put(req_info.dev); | |
216 | genlmsg_end(rskb, reply_payload); | |
217 | ||
218 | return genlmsg_reply(rskb, info); | |
219 | ||
220 | err_free_msg: | |
221 | nlmsg_free(rskb); | |
222 | err_unlock_rtnl: | |
223 | rtnl_unlock(); | |
224 | dev_put(req_info.dev); | |
225 | return ret; | |
226 | } | |
227 | ||
228 | struct ethnl_tunnel_info_dump_ctx { | |
229 | struct ethnl_req_info req_info; | |
230 | int pos_hash; | |
231 | int pos_idx; | |
232 | }; | |
233 | ||
234 | int ethnl_tunnel_info_start(struct netlink_callback *cb) | |
235 | { | |
236 | struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx; | |
237 | int ret; | |
238 | ||
239 | BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); | |
240 | ||
241 | memset(ctx, 0, sizeof(*ctx)); | |
242 | ||
243 | ret = ethnl_tunnel_info_req_parse(&ctx->req_info, cb->nlh, | |
244 | sock_net(cb->skb->sk), cb->extack, | |
245 | false); | |
246 | if (ctx->req_info.dev) { | |
247 | dev_put(ctx->req_info.dev); | |
248 | ctx->req_info.dev = NULL; | |
249 | } | |
250 | ||
251 | return ret; | |
252 | } | |
253 | ||
254 | int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb) | |
255 | { | |
256 | struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx; | |
257 | struct net *net = sock_net(skb->sk); | |
258 | int s_idx = ctx->pos_idx; | |
259 | int h, idx = 0; | |
260 | int ret = 0; | |
261 | void *ehdr; | |
262 | ||
263 | rtnl_lock(); | |
264 | cb->seq = net->dev_base_seq; | |
265 | for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { | |
266 | struct hlist_head *head; | |
267 | struct net_device *dev; | |
268 | ||
269 | head = &net->dev_index_head[h]; | |
270 | idx = 0; | |
271 | hlist_for_each_entry(dev, head, index_hlist) { | |
272 | if (idx < s_idx) | |
273 | goto cont; | |
274 | ||
275 | ehdr = ethnl_dump_put(skb, cb, | |
276 | ETHTOOL_MSG_TUNNEL_INFO_GET); | |
277 | if (!ehdr) { | |
278 | ret = -EMSGSIZE; | |
279 | goto out; | |
280 | } | |
281 | ||
282 | ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_TUNNEL_INFO_HEADER); | |
283 | if (ret < 0) { | |
284 | genlmsg_cancel(skb, ehdr); | |
285 | goto out; | |
286 | } | |
287 | ||
288 | ctx->req_info.dev = dev; | |
289 | ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb); | |
290 | ctx->req_info.dev = NULL; | |
291 | if (ret < 0) { | |
292 | genlmsg_cancel(skb, ehdr); | |
293 | if (ret == -EOPNOTSUPP) | |
294 | goto cont; | |
295 | goto out; | |
296 | } | |
297 | genlmsg_end(skb, ehdr); | |
298 | cont: | |
299 | idx++; | |
300 | } | |
301 | } | |
302 | out: | |
303 | rtnl_unlock(); | |
304 | ||
305 | ctx->pos_hash = h; | |
306 | ctx->pos_idx = idx; | |
307 | nl_dump_check_consistent(cb, nlmsg_hdr(skb)); | |
308 | ||
309 | if (ret == -EMSGSIZE && skb->len) | |
310 | return skb->len; | |
311 | return ret; | |
312 | } |