xdp: Clear grow memory in bpf_xdp_adjust_tail()
[linux-2.6-block.git] / net / bpf / test_run.c
CommitLineData
25763b3c 1// SPDX-License-Identifier: GPL-2.0-only
1cf1cae9 2/* Copyright (c) 2017 Facebook
1cf1cae9
AS
3 */
4#include <linux/bpf.h>
5#include <linux/slab.h>
6#include <linux/vmalloc.h>
7#include <linux/etherdevice.h>
8#include <linux/filter.h>
9#include <linux/sched/signal.h>
6ac99e8f 10#include <net/bpf_sk_storage.h>
2cb494a3
SL
11#include <net/sock.h>
12#include <net/tcp.h>
3d08b6f2 13#include <linux/error-injection.h>
1cf1cae9 14
e950e843
MM
15#define CREATE_TRACE_POINTS
16#include <trace/events/bpf_test_run.h>
17
df1a2cb7 18static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
f23c4b39 19 u32 *retval, u32 *time, bool xdp)
1cf1cae9 20{
71b91a50 21 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
8bad74f9 22 enum bpf_cgroup_storage_type stype;
1cf1cae9 23 u64 time_start, time_spent = 0;
df1a2cb7 24 int ret = 0;
dcb40590 25 u32 i;
1cf1cae9 26
8bad74f9
RG
27 for_each_cgroup_storage_type(stype) {
28 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
29 if (IS_ERR(storage[stype])) {
30 storage[stype] = NULL;
31 for_each_cgroup_storage_type(stype)
32 bpf_cgroup_storage_free(storage[stype]);
33 return -ENOMEM;
34 }
35 }
f42ee093 36
1cf1cae9
AS
37 if (!repeat)
38 repeat = 1;
df1a2cb7
SF
39
40 rcu_read_lock();
6eac7795 41 migrate_disable();
1cf1cae9
AS
42 time_start = ktime_get_ns();
43 for (i = 0; i < repeat; i++) {
df1a2cb7 44 bpf_cgroup_storage_set(storage);
f23c4b39
BT
45
46 if (xdp)
47 *retval = bpf_prog_run_xdp(prog, ctx);
48 else
49 *retval = BPF_PROG_RUN(prog, ctx);
df1a2cb7
SF
50
51 if (signal_pending(current)) {
52 ret = -EINTR;
53 break;
54 }
55
1cf1cae9 56 if (need_resched()) {
1cf1cae9 57 time_spent += ktime_get_ns() - time_start;
6eac7795 58 migrate_enable();
df1a2cb7
SF
59 rcu_read_unlock();
60
1cf1cae9 61 cond_resched();
df1a2cb7
SF
62
63 rcu_read_lock();
6eac7795 64 migrate_disable();
1cf1cae9
AS
65 time_start = ktime_get_ns();
66 }
67 }
68 time_spent += ktime_get_ns() - time_start;
6eac7795 69 migrate_enable();
df1a2cb7
SF
70 rcu_read_unlock();
71
1cf1cae9
AS
72 do_div(time_spent, repeat);
73 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
74
8bad74f9
RG
75 for_each_cgroup_storage_type(stype)
76 bpf_cgroup_storage_free(storage[stype]);
f42ee093 77
df1a2cb7 78 return ret;
1cf1cae9
AS
79}
80
78e52272
DM
81static int bpf_test_finish(const union bpf_attr *kattr,
82 union bpf_attr __user *uattr, const void *data,
1cf1cae9
AS
83 u32 size, u32 retval, u32 duration)
84{
78e52272 85 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
1cf1cae9 86 int err = -EFAULT;
b5a36b1e 87 u32 copy_size = size;
1cf1cae9 88
b5a36b1e
LB
89 /* Clamp copy if the user has provided a size hint, but copy the full
90 * buffer if not to retain old behaviour.
91 */
92 if (kattr->test.data_size_out &&
93 copy_size > kattr->test.data_size_out) {
94 copy_size = kattr->test.data_size_out;
95 err = -ENOSPC;
96 }
97
98 if (data_out && copy_to_user(data_out, data, copy_size))
1cf1cae9
AS
99 goto out;
100 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
101 goto out;
102 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
103 goto out;
104 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
105 goto out;
b5a36b1e
LB
106 if (err != -ENOSPC)
107 err = 0;
1cf1cae9 108out:
e950e843 109 trace_bpf_test_finish(&err);
1cf1cae9
AS
110 return err;
111}
112
faeb2dce
AS
113/* Integer types of various sizes and pointer combinations cover variety of
114 * architecture dependent calling conventions. 7+ can be supported in the
115 * future.
116 */
e9ff9d52
JPM
117__diag_push();
118__diag_ignore(GCC, 8, "-Wmissing-prototypes",
119 "Global functions as their definitions will be in vmlinux BTF");
faeb2dce
AS
120int noinline bpf_fentry_test1(int a)
121{
122 return a + 1;
123}
124
125int noinline bpf_fentry_test2(int a, u64 b)
126{
127 return a + b;
128}
129
130int noinline bpf_fentry_test3(char a, int b, u64 c)
131{
132 return a + b + c;
133}
134
135int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
136{
137 return (long)a + b + c + d;
138}
139
140int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
141{
142 return a + (long)b + c + d + e;
143}
144
145int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
146{
147 return a + (long)b + c + d + (long)e + f;
148}
149
3d08b6f2
KS
150int noinline bpf_modify_return_test(int a, int *b)
151{
152 *b += 1;
153 return a + *b;
154}
e9ff9d52 155__diag_pop();
3d08b6f2
KS
156
157ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
158
1cf1cae9
AS
159static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
160 u32 headroom, u32 tailroom)
161{
162 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
163 void *data;
164
165 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
166 return ERR_PTR(-EINVAL);
167
168 data = kzalloc(size + headroom + tailroom, GFP_USER);
169 if (!data)
170 return ERR_PTR(-ENOMEM);
171
172 if (copy_from_user(data + headroom, data_in, size)) {
173 kfree(data);
174 return ERR_PTR(-EFAULT);
175 }
da00d2f1 176
1cf1cae9
AS
177 return data;
178}
179
da00d2f1
KS
180int bpf_prog_test_run_tracing(struct bpf_prog *prog,
181 const union bpf_attr *kattr,
182 union bpf_attr __user *uattr)
183{
3d08b6f2
KS
184 u16 side_effect = 0, ret = 0;
185 int b = 2, err = -EFAULT;
186 u32 retval = 0;
da00d2f1
KS
187
188 switch (prog->expected_attach_type) {
189 case BPF_TRACE_FENTRY:
190 case BPF_TRACE_FEXIT:
191 if (bpf_fentry_test1(1) != 2 ||
192 bpf_fentry_test2(2, 3) != 5 ||
193 bpf_fentry_test3(4, 5, 6) != 15 ||
194 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
195 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
196 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111)
197 goto out;
198 break;
3d08b6f2
KS
199 case BPF_MODIFY_RETURN:
200 ret = bpf_modify_return_test(1, &b);
201 if (b != 2)
202 side_effect = 1;
203 break;
da00d2f1
KS
204 default:
205 goto out;
206 }
207
3d08b6f2
KS
208 retval = ((u32)side_effect << 16) | ret;
209 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
210 goto out;
211
da00d2f1
KS
212 err = 0;
213out:
214 trace_bpf_test_finish(&err);
215 return err;
216}
217
b0b9395d
SF
218static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
219{
220 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
221 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
222 u32 size = kattr->test.ctx_size_in;
223 void *data;
224 int err;
225
226 if (!data_in && !data_out)
227 return NULL;
228
229 data = kzalloc(max_size, GFP_USER);
230 if (!data)
231 return ERR_PTR(-ENOMEM);
232
233 if (data_in) {
234 err = bpf_check_uarg_tail_zero(data_in, max_size, size);
235 if (err) {
236 kfree(data);
237 return ERR_PTR(err);
238 }
239
240 size = min_t(u32, max_size, size);
241 if (copy_from_user(data, data_in, size)) {
242 kfree(data);
243 return ERR_PTR(-EFAULT);
244 }
245 }
246 return data;
247}
248
249static int bpf_ctx_finish(const union bpf_attr *kattr,
250 union bpf_attr __user *uattr, const void *data,
251 u32 size)
252{
253 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
254 int err = -EFAULT;
255 u32 copy_size = size;
256
257 if (!data || !data_out)
258 return 0;
259
260 if (copy_size > kattr->test.ctx_size_out) {
261 copy_size = kattr->test.ctx_size_out;
262 err = -ENOSPC;
263 }
264
265 if (copy_to_user(data_out, data, copy_size))
266 goto out;
267 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
268 goto out;
269 if (err != -ENOSPC)
270 err = 0;
271out:
272 return err;
273}
274
275/**
276 * range_is_zero - test whether buffer is initialized
277 * @buf: buffer to check
278 * @from: check from this position
279 * @to: check up until (excluding) this position
280 *
281 * This function returns true if the there is a non-zero byte
282 * in the buf in the range [from,to).
283 */
284static inline bool range_is_zero(void *buf, size_t from, size_t to)
285{
286 return !memchr_inv((u8 *)buf + from, 0, to - from);
287}
288
289static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
290{
291 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
292
293 if (!__skb)
294 return 0;
295
296 /* make sure the fields we don't use are zeroed */
6de6c1f8
NS
297 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
298 return -EINVAL;
299
300 /* mark is allowed */
301
302 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
303 offsetof(struct __sk_buff, priority)))
b0b9395d
SF
304 return -EINVAL;
305
306 /* priority is allowed */
307
b590cb5f 308 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
b0b9395d
SF
309 offsetof(struct __sk_buff, cb)))
310 return -EINVAL;
311
312 /* cb is allowed */
313
b590cb5f 314 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
ba940948
SF
315 offsetof(struct __sk_buff, tstamp)))
316 return -EINVAL;
317
318 /* tstamp is allowed */
850a88cc
SF
319 /* wire_len is allowed */
320 /* gso_segs is allowed */
ba940948 321
850a88cc 322 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
cf62089b
WB
323 offsetof(struct __sk_buff, gso_size)))
324 return -EINVAL;
325
326 /* gso_size is allowed */
327
328 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
b0b9395d
SF
329 sizeof(struct __sk_buff)))
330 return -EINVAL;
331
6de6c1f8 332 skb->mark = __skb->mark;
b0b9395d 333 skb->priority = __skb->priority;
ba940948 334 skb->tstamp = __skb->tstamp;
b0b9395d
SF
335 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
336
850a88cc
SF
337 if (__skb->wire_len == 0) {
338 cb->pkt_len = skb->len;
339 } else {
340 if (__skb->wire_len < skb->len ||
341 __skb->wire_len > GSO_MAX_SIZE)
342 return -EINVAL;
343 cb->pkt_len = __skb->wire_len;
344 }
345
346 if (__skb->gso_segs > GSO_MAX_SEGS)
347 return -EINVAL;
348 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
cf62089b 349 skb_shinfo(skb)->gso_size = __skb->gso_size;
850a88cc 350
b0b9395d
SF
351 return 0;
352}
353
354static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
355{
356 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
357
358 if (!__skb)
359 return;
360
6de6c1f8 361 __skb->mark = skb->mark;
b0b9395d 362 __skb->priority = skb->priority;
ba940948 363 __skb->tstamp = skb->tstamp;
b0b9395d 364 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
850a88cc
SF
365 __skb->wire_len = cb->pkt_len;
366 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
b0b9395d
SF
367}
368
1cf1cae9
AS
369int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
370 union bpf_attr __user *uattr)
371{
372 bool is_l2 = false, is_direct_pkt_access = false;
373 u32 size = kattr->test.data_size_in;
374 u32 repeat = kattr->test.repeat;
b0b9395d 375 struct __sk_buff *ctx = NULL;
1cf1cae9 376 u32 retval, duration;
6e6fddc7 377 int hh_len = ETH_HLEN;
1cf1cae9 378 struct sk_buff *skb;
2cb494a3 379 struct sock *sk;
1cf1cae9
AS
380 void *data;
381 int ret;
382
586f8525 383 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
1cf1cae9
AS
384 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
385 if (IS_ERR(data))
386 return PTR_ERR(data);
387
b0b9395d
SF
388 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
389 if (IS_ERR(ctx)) {
390 kfree(data);
391 return PTR_ERR(ctx);
392 }
393
1cf1cae9
AS
394 switch (prog->type) {
395 case BPF_PROG_TYPE_SCHED_CLS:
396 case BPF_PROG_TYPE_SCHED_ACT:
397 is_l2 = true;
398 /* fall through */
399 case BPF_PROG_TYPE_LWT_IN:
400 case BPF_PROG_TYPE_LWT_OUT:
401 case BPF_PROG_TYPE_LWT_XMIT:
402 is_direct_pkt_access = true;
403 break;
404 default:
405 break;
406 }
407
2cb494a3
SL
408 sk = kzalloc(sizeof(struct sock), GFP_USER);
409 if (!sk) {
410 kfree(data);
b0b9395d 411 kfree(ctx);
2cb494a3
SL
412 return -ENOMEM;
413 }
414 sock_net_set(sk, current->nsproxy->net_ns);
415 sock_init_data(NULL, sk);
416
1cf1cae9
AS
417 skb = build_skb(data, 0);
418 if (!skb) {
419 kfree(data);
b0b9395d 420 kfree(ctx);
2cb494a3 421 kfree(sk);
1cf1cae9
AS
422 return -ENOMEM;
423 }
2cb494a3 424 skb->sk = sk;
1cf1cae9 425
586f8525 426 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1cf1cae9
AS
427 __skb_put(skb, size);
428 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
429 skb_reset_network_header(skb);
430
431 if (is_l2)
6e6fddc7 432 __skb_push(skb, hh_len);
1cf1cae9 433 if (is_direct_pkt_access)
6aaae2b6 434 bpf_compute_data_pointers(skb);
b0b9395d
SF
435 ret = convert___skb_to_skb(skb, ctx);
436 if (ret)
437 goto out;
f23c4b39 438 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
b0b9395d
SF
439 if (ret)
440 goto out;
6e6fddc7
DB
441 if (!is_l2) {
442 if (skb_headroom(skb) < hh_len) {
443 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
444
445 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
b0b9395d
SF
446 ret = -ENOMEM;
447 goto out;
6e6fddc7
DB
448 }
449 }
450 memset(__skb_push(skb, hh_len), 0, hh_len);
451 }
b0b9395d 452 convert_skb_to___skb(skb, ctx);
6e6fddc7 453
1cf1cae9
AS
454 size = skb->len;
455 /* bpf program can never convert linear skb to non-linear */
456 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
457 size = skb_headlen(skb);
78e52272 458 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
b0b9395d
SF
459 if (!ret)
460 ret = bpf_ctx_finish(kattr, uattr, ctx,
461 sizeof(struct __sk_buff));
462out:
1cf1cae9 463 kfree_skb(skb);
6ac99e8f 464 bpf_sk_storage_free(sk);
2cb494a3 465 kfree(sk);
b0b9395d 466 kfree(ctx);
1cf1cae9
AS
467 return ret;
468}
469
470int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
471 union bpf_attr __user *uattr)
472{
473 u32 size = kattr->test.data_size_in;
474 u32 repeat = kattr->test.repeat;
65073a67 475 struct netdev_rx_queue *rxqueue;
1cf1cae9
AS
476 struct xdp_buff xdp = {};
477 u32 retval, duration;
478 void *data;
479 int ret;
480
947e8b59
SF
481 if (kattr->test.ctx_in || kattr->test.ctx_out)
482 return -EINVAL;
483
586f8525 484 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
1cf1cae9
AS
485 if (IS_ERR(data))
486 return PTR_ERR(data);
487
488 xdp.data_hard_start = data;
586f8525 489 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
de8f3a83 490 xdp.data_meta = xdp.data;
1cf1cae9
AS
491 xdp.data_end = xdp.data + size;
492
65073a67
DB
493 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
494 xdp.rxq = &rxqueue->xdp_rxq;
f23c4b39
BT
495 bpf_prog_change_xdp(NULL, prog);
496 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
dcb40590
RG
497 if (ret)
498 goto out;
587b80cc
NS
499 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
500 xdp.data_end != xdp.data + size)
1cf1cae9 501 size = xdp.data_end - xdp.data;
78e52272 502 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
dcb40590 503out:
f23c4b39 504 bpf_prog_change_xdp(prog, NULL);
1cf1cae9
AS
505 kfree(data);
506 return ret;
507}
b7a1848e 508
b2ca4e1c
SF
509static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
510{
511 /* make sure the fields we don't use are zeroed */
512 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
513 return -EINVAL;
514
515 /* flags is allowed */
516
b590cb5f 517 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
b2ca4e1c
SF
518 sizeof(struct bpf_flow_keys)))
519 return -EINVAL;
520
521 return 0;
522}
523
b7a1848e
SF
524int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
525 const union bpf_attr *kattr,
526 union bpf_attr __user *uattr)
527{
528 u32 size = kattr->test.data_size_in;
7b8a1304 529 struct bpf_flow_dissector ctx = {};
b7a1848e 530 u32 repeat = kattr->test.repeat;
b2ca4e1c 531 struct bpf_flow_keys *user_ctx;
b7a1848e
SF
532 struct bpf_flow_keys flow_keys;
533 u64 time_start, time_spent = 0;
7b8a1304 534 const struct ethhdr *eth;
b2ca4e1c 535 unsigned int flags = 0;
b7a1848e 536 u32 retval, duration;
b7a1848e
SF
537 void *data;
538 int ret;
539 u32 i;
540
541 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
542 return -EINVAL;
543
7b8a1304
SF
544 if (size < ETH_HLEN)
545 return -EINVAL;
546
547 data = bpf_test_init(kattr, size, 0, 0);
b7a1848e
SF
548 if (IS_ERR(data))
549 return PTR_ERR(data);
550
7b8a1304 551 eth = (struct ethhdr *)data;
b7a1848e 552
b7a1848e
SF
553 if (!repeat)
554 repeat = 1;
555
b2ca4e1c
SF
556 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
557 if (IS_ERR(user_ctx)) {
558 kfree(data);
559 return PTR_ERR(user_ctx);
560 }
561 if (user_ctx) {
562 ret = verify_user_bpf_flow_keys(user_ctx);
563 if (ret)
564 goto out;
565 flags = user_ctx->flags;
566 }
567
7b8a1304
SF
568 ctx.flow_keys = &flow_keys;
569 ctx.data = data;
570 ctx.data_end = (__u8 *)data + size;
571
a439184d
SF
572 rcu_read_lock();
573 preempt_disable();
b7a1848e
SF
574 time_start = ktime_get_ns();
575 for (i = 0; i < repeat; i++) {
7b8a1304 576 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
b2ca4e1c 577 size, flags);
7b8a1304 578
a439184d
SF
579 if (signal_pending(current)) {
580 preempt_enable();
581 rcu_read_unlock();
582
583 ret = -EINTR;
584 goto out;
585 }
b7a1848e
SF
586
587 if (need_resched()) {
b7a1848e 588 time_spent += ktime_get_ns() - time_start;
a439184d
SF
589 preempt_enable();
590 rcu_read_unlock();
591
b7a1848e 592 cond_resched();
a439184d
SF
593
594 rcu_read_lock();
595 preempt_disable();
b7a1848e
SF
596 time_start = ktime_get_ns();
597 }
598 }
599 time_spent += ktime_get_ns() - time_start;
a439184d
SF
600 preempt_enable();
601 rcu_read_unlock();
602
b7a1848e
SF
603 do_div(time_spent, repeat);
604 duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
605
606 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
607 retval, duration);
b2ca4e1c
SF
608 if (!ret)
609 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
610 sizeof(struct bpf_flow_keys));
b7a1848e 611
a439184d 612out:
b2ca4e1c 613 kfree(user_ctx);
7b8a1304 614 kfree(data);
b7a1848e
SF
615 return ret;
616}