tap: Extending tap device create/destroy APIs
[linux-2.6-block.git] / drivers / net / tap.c
CommitLineData
20d29d7a 1#include <linux/etherdevice.h>
6fe3faf8 2#include <linux/if_tap.h>
f09e2249 3#include <linux/if_vlan.h>
20d29d7a
AB
4#include <linux/interrupt.h>
5#include <linux/nsproxy.h>
6#include <linux/compat.h>
7#include <linux/if_tun.h>
8#include <linux/module.h>
9#include <linux/skbuff.h>
10#include <linux/cache.h>
11#include <linux/sched.h>
12#include <linux/types.h>
5a0e3ad6 13#include <linux/slab.h>
20d29d7a
AB
14#include <linux/wait.h>
15#include <linux/cdev.h>
40401530 16#include <linux/idr.h>
20d29d7a 17#include <linux/fs.h>
6c36d2e2 18#include <linux/uio.h>
20d29d7a
AB
19
20#include <net/net_namespace.h>
21#include <net/rtnetlink.h>
22#include <net/sock.h>
b9fb9ee0 23#include <linux/virtio_net.h>
362899b8 24#include <linux/skb_array.h>
20d29d7a 25
635b8c8e 26#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
01b07fb3 27
635b8c8e
SG
28#define TAP_VNET_LE 0x80000000
29#define TAP_VNET_BE 0x40000000
8b8e658b
GK
30
31#ifdef CONFIG_TUN_VNET_CROSS_LE
635b8c8e 32static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
8b8e658b 33{
635b8c8e 34 return q->flags & TAP_VNET_BE ? false :
8b8e658b
GK
35 virtio_legacy_is_little_endian();
36}
37
635b8c8e 38static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
8b8e658b 39{
635b8c8e 40 int s = !!(q->flags & TAP_VNET_BE);
8b8e658b
GK
41
42 if (put_user(s, sp))
43 return -EFAULT;
44
45 return 0;
46}
47
635b8c8e 48static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
8b8e658b
GK
49{
50 int s;
51
52 if (get_user(s, sp))
53 return -EFAULT;
54
55 if (s)
635b8c8e 56 q->flags |= TAP_VNET_BE;
8b8e658b 57 else
635b8c8e 58 q->flags &= ~TAP_VNET_BE;
8b8e658b
GK
59
60 return 0;
61}
62#else
635b8c8e 63static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
8b8e658b
GK
64{
65 return virtio_legacy_is_little_endian();
66}
67
635b8c8e 68static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
8b8e658b
GK
69{
70 return -EINVAL;
71}
72
635b8c8e 73static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
8b8e658b
GK
74{
75 return -EINVAL;
76}
77#endif /* CONFIG_TUN_VNET_CROSS_LE */
6ae7feb3 78
635b8c8e 79static inline bool tap_is_little_endian(struct tap_queue *q)
5b11e15f 80{
635b8c8e
SG
81 return q->flags & TAP_VNET_LE ||
82 tap_legacy_is_little_endian(q);
5b11e15f 83}
6ae7feb3 84
635b8c8e 85static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
6ae7feb3 86{
635b8c8e 87 return __virtio16_to_cpu(tap_is_little_endian(q), val);
6ae7feb3
MT
88}
89
635b8c8e 90static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
6ae7feb3 91{
635b8c8e 92 return __cpu_to_virtio16(tap_is_little_endian(q), val);
6ae7feb3
MT
93}
94
635b8c8e
SG
95static struct proto tap_proto = {
96 .name = "tap",
20d29d7a 97 .owner = THIS_MODULE,
635b8c8e 98 .obj_size = sizeof(struct tap_queue),
20d29d7a
AB
99};
100
635b8c8e 101#define TAP_NUM_DEVS (1U << MINORBITS)
d9f1f61c
SG
102
103static LIST_HEAD(major_list);
104
ebc05ba7 105struct major_info {
d9f1f61c 106 struct rcu_head rcu;
ebc05ba7
SG
107 dev_t major;
108 struct idr minor_idr;
109 struct mutex minor_lock;
110 const char *device_name;
d9f1f61c
SG
111 struct list_head next;
112};
e09eff7f 113
97bc3633 114#define GOODCOPY_LEN 128
20d29d7a 115
635b8c8e 116static const struct proto_ops tap_socket_ops;
501c774c 117
2be5c767 118#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
f23d538b 119#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
a567dd62 120
6fe3faf8 121static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev)
6acf54f1
VY
122{
123 return rcu_dereference(dev->rx_handler_data);
124}
125
20d29d7a
AB
126/*
127 * RCU usage:
635b8c8e 128 * The tap_queue and the macvlan_dev are loosely coupled, the
02df55d2 129 * pointers from one to the other can only be read while rcu_read_lock
441ac0fc 130 * or rtnl is held.
20d29d7a 131 *
635b8c8e 132 * Both the file and the macvlan_dev hold a reference on the tap_queue
02df55d2
AB
133 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
134 * q->vlan becomes inaccessible. When the files gets closed,
635b8c8e 135 * tap_get_queue() fails.
20d29d7a 136 *
02df55d2
AB
137 * There may still be references to the struct sock inside of the
138 * queue from outbound SKBs, but these never reference back to the
139 * file or the dev. The data structure is freed through __sk_free
140 * when both our references and any pending SKBs are gone.
20d29d7a 141 */
20d29d7a 142
6fe3faf8 143static int tap_enable_queue(struct tap_dev *tap, struct file *file,
635b8c8e 144 struct tap_queue *q)
815f236d 145{
815f236d
JW
146 int err = -EINVAL;
147
441ac0fc 148 ASSERT_RTNL();
815f236d
JW
149
150 if (q->enabled)
151 goto out;
152
153 err = 0;
6fe3faf8
SG
154 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
155 q->queue_index = tap->numvtaps;
815f236d
JW
156 q->enabled = true;
157
6fe3faf8 158 tap->numvtaps++;
815f236d 159out:
815f236d
JW
160 return err;
161}
162
40b8fe45 163/* Requires RTNL */
6fe3faf8 164static int tap_set_queue(struct tap_dev *tap, struct file *file,
635b8c8e 165 struct tap_queue *q)
20d29d7a 166{
6fe3faf8 167 if (tap->numqueues == MAX_TAP_QUEUES)
40b8fe45 168 return -EBUSY;
20d29d7a 169
6fe3faf8
SG
170 rcu_assign_pointer(q->tap, tap);
171 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
02df55d2 172 sock_hold(&q->sk);
20d29d7a
AB
173
174 q->file = file;
6fe3faf8 175 q->queue_index = tap->numvtaps;
815f236d 176 q->enabled = true;
02df55d2 177 file->private_data = q;
6fe3faf8 178 list_add_tail(&q->next, &tap->queue_list);
20d29d7a 179
6fe3faf8
SG
180 tap->numvtaps++;
181 tap->numqueues++;
1565c7c1 182
40b8fe45 183 return 0;
20d29d7a
AB
184}
185
635b8c8e 186static int tap_disable_queue(struct tap_queue *q)
815f236d 187{
6fe3faf8 188 struct tap_dev *tap;
635b8c8e 189 struct tap_queue *nq;
815f236d 190
441ac0fc 191 ASSERT_RTNL();
815f236d
JW
192 if (!q->enabled)
193 return -EINVAL;
194
6fe3faf8 195 tap = rtnl_dereference(q->tap);
441ac0fc 196
6fe3faf8 197 if (tap) {
815f236d 198 int index = q->queue_index;
6fe3faf8
SG
199 BUG_ON(index >= tap->numvtaps);
200 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]);
815f236d
JW
201 nq->queue_index = index;
202
6fe3faf8
SG
203 rcu_assign_pointer(tap->taps[index], nq);
204 RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL);
815f236d
JW
205 q->enabled = false;
206
6fe3faf8 207 tap->numvtaps--;
815f236d
JW
208 }
209
210 return 0;
211}
212
20d29d7a 213/*
02df55d2
AB
214 * The file owning the queue got closed, give up both
215 * the reference that the files holds as well as the
216 * one from the macvlan_dev if that still exists.
20d29d7a
AB
217 *
218 * Using the spinlock makes sure that we don't get
219 * to the queue again after destroying it.
20d29d7a 220 */
635b8c8e 221static void tap_put_queue(struct tap_queue *q)
20d29d7a 222{
6fe3faf8 223 struct tap_dev *tap;
20d29d7a 224
441ac0fc 225 rtnl_lock();
6fe3faf8 226 tap = rtnl_dereference(q->tap);
441ac0fc 227
6fe3faf8 228 if (tap) {
815f236d 229 if (q->enabled)
635b8c8e 230 BUG_ON(tap_disable_queue(q));
376b1aab 231
6fe3faf8
SG
232 tap->numqueues--;
233 RCU_INIT_POINTER(q->tap, NULL);
02df55d2 234 sock_put(&q->sk);
815f236d 235 list_del_init(&q->next);
20d29d7a
AB
236 }
237
441ac0fc 238 rtnl_unlock();
20d29d7a
AB
239
240 synchronize_rcu();
241 sock_put(&q->sk);
242}
243
244/*
1565c7c1
KK
245 * Select a queue based on the rxq of the device on which this packet
246 * arrived. If the incoming device is not mq, calculate a flow hash
247 * to select a queue. If all fails, find the first available queue.
248 * Cache vlan->numvtaps since it can become zero during the execution
249 * of this function.
20d29d7a 250 */
6fe3faf8 251static struct tap_queue *tap_get_queue(struct tap_dev *tap,
635b8c8e 252 struct sk_buff *skb)
20d29d7a 253{
6fe3faf8 254 struct tap_queue *queue = NULL;
815f236d
JW
255 /* Access to taps array is protected by rcu, but access to numvtaps
256 * isn't. Below we use it to lookup a queue, but treat it as a hint
257 * and validate that the result isn't NULL - in case we are
258 * racing against queue removal.
259 */
6fe3faf8 260 int numvtaps = ACCESS_ONCE(tap->numvtaps);
1565c7c1
KK
261 __u32 rxq;
262
263 if (!numvtaps)
264 goto out;
265
1b16bf42
JW
266 if (numvtaps == 1)
267 goto single;
268
ef0002b5 269 /* Check if we can use flow to select a queue */
3958afa1 270 rxq = skb_get_hash(skb);
ef0002b5 271 if (rxq) {
6fe3faf8 272 queue = rcu_dereference(tap->taps[rxq % numvtaps]);
376b1aab 273 goto out;
ef0002b5
KK
274 }
275
1565c7c1
KK
276 if (likely(skb_rx_queue_recorded(skb))) {
277 rxq = skb_get_rx_queue(skb);
20d29d7a 278
1565c7c1
KK
279 while (unlikely(rxq >= numvtaps))
280 rxq -= numvtaps;
281
6fe3faf8 282 queue = rcu_dereference(tap->taps[rxq]);
376b1aab 283 goto out;
1565c7c1
KK
284 }
285
1b16bf42 286single:
6fe3faf8 287 queue = rcu_dereference(tap->taps[0]);
1565c7c1 288out:
6fe3faf8 289 return queue;
20d29d7a
AB
290}
291
02df55d2
AB
292/*
293 * The net_device is going away, give up the reference
1565c7c1
KK
294 * that it holds on all queues and safely set the pointer
295 * from the queues to NULL.
02df55d2 296 */
6fe3faf8 297void tap_del_queues(struct tap_dev *tap)
20d29d7a 298{
635b8c8e 299 struct tap_queue *q, *tmp;
02df55d2 300
441ac0fc 301 ASSERT_RTNL();
6fe3faf8 302 list_for_each_entry_safe(q, tmp, &tap->queue_list, next) {
815f236d 303 list_del_init(&q->next);
6fe3faf8 304 RCU_INIT_POINTER(q->tap, NULL);
815f236d 305 if (q->enabled)
6fe3faf8
SG
306 tap->numvtaps--;
307 tap->numqueues--;
dfe816c5 308 sock_put(&q->sk);
564517e8 309 }
6fe3faf8
SG
310 BUG_ON(tap->numvtaps);
311 BUG_ON(tap->numqueues);
635b8c8e 312 /* guarantee that any future tap_set_queue will fail */
6fe3faf8 313 tap->numvtaps = MAX_TAP_QUEUES;
20d29d7a
AB
314}
315
635b8c8e 316rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
20d29d7a 317{
6acf54f1
VY
318 struct sk_buff *skb = *pskb;
319 struct net_device *dev = skb->dev;
6fe3faf8 320 struct tap_dev *tap;
635b8c8e 321 struct tap_queue *q;
a567dd62
VY
322 netdev_features_t features = TAP_FEATURES;
323
6fe3faf8
SG
324 tap = tap_dev_get_rcu(dev);
325 if (!tap)
6acf54f1
VY
326 return RX_HANDLER_PASS;
327
6fe3faf8 328 q = tap_get_queue(tap, skb);
20d29d7a 329 if (!q)
6acf54f1 330 return RX_HANDLER_PASS;
8a35747a 331
362899b8 332 if (__skb_array_full(&q->skb_array))
8a35747a 333 goto drop;
20d29d7a 334
6acf54f1
VY
335 skb_push(skb, ETH_HLEN);
336
3e4f8b78 337 /* Apply the forward feature mask so that we perform segmentation
e5733321
VY
338 * according to users wishes. This only works if VNET_HDR is
339 * enabled.
3e4f8b78 340 */
e5733321 341 if (q->flags & IFF_VNET_HDR)
6fe3faf8 342 features |= tap->tap_features;
8b86a61d 343 if (netif_needs_gso(skb, features)) {
3e4f8b78
VY
344 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
345
346 if (IS_ERR(segs))
347 goto drop;
348
349 if (!segs) {
362899b8
JW
350 if (skb_array_produce(&q->skb_array, skb))
351 goto drop;
3e4f8b78
VY
352 goto wake_up;
353 }
354
be0bd316 355 consume_skb(skb);
3e4f8b78
VY
356 while (segs) {
357 struct sk_buff *nskb = segs->next;
358
359 segs->next = NULL;
362899b8
JW
360 if (skb_array_produce(&q->skb_array, segs)) {
361 kfree_skb(segs);
362 kfree_skb_list(nskb);
363 break;
364 }
3e4f8b78
VY
365 segs = nskb;
366 }
367 } else {
cbdb0427
VY
368 /* If we receive a partial checksum and the tap side
369 * doesn't support checksum offload, compute the checksum.
370 * Note: it doesn't matter which checksum feature to
a8e04698 371 * check, we either support them all or none.
cbdb0427
VY
372 */
373 if (skb->ip_summed == CHECKSUM_PARTIAL &&
a188222b 374 !(features & NETIF_F_CSUM_MASK) &&
cbdb0427
VY
375 skb_checksum_help(skb))
376 goto drop;
362899b8
JW
377 if (skb_array_produce(&q->skb_array, skb))
378 goto drop;
3e4f8b78
VY
379 }
380
381wake_up:
4a4771a5 382 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
6acf54f1 383 return RX_HANDLER_CONSUMED;
8a35747a
HX
384
385drop:
6acf54f1 386 /* Count errors/drops only here, thus don't care about args. */
6fe3faf8
SG
387 if (tap->count_rx_dropped)
388 tap->count_rx_dropped(tap);
8a35747a 389 kfree_skb(skb);
6acf54f1 390 return RX_HANDLER_CONSUMED;
20d29d7a
AB
391}
392
d9f1f61c
SG
393static struct major_info *tap_get_major(int major)
394{
395 struct major_info *tap_major;
396
397 list_for_each_entry_rcu(tap_major, &major_list, next) {
398 if (tap_major->major == major)
399 return tap_major;
400 }
401
402 return NULL;
403}
404
405int tap_get_minor(dev_t major, struct tap_dev *tap)
e09eff7f
EB
406{
407 int retval = -ENOMEM;
d9f1f61c
SG
408 struct major_info *tap_major;
409
410 rcu_read_lock();
411 tap_major = tap_get_major(MAJOR(major));
412 if (!tap_major) {
413 retval = -EINVAL;
414 goto unlock;
415 }
e09eff7f 416
d9f1f61c
SG
417 mutex_lock(&tap_major->minor_lock);
418 retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_KERNEL);
ec09ebc1 419 if (retval >= 0) {
6fe3faf8 420 tap->minor = retval;
ec09ebc1 421 } else if (retval == -ENOSPC) {
6fe3faf8 422 netdev_err(tap->dev, "Too many tap devices\n");
e09eff7f 423 retval = -EINVAL;
e09eff7f 424 }
d9f1f61c
SG
425 mutex_unlock(&tap_major->minor_lock);
426
427unlock:
428 rcu_read_unlock();
ec09ebc1 429 return retval < 0 ? retval : 0;
e09eff7f
EB
430}
431
d9f1f61c 432void tap_free_minor(dev_t major, struct tap_dev *tap)
e09eff7f 433{
d9f1f61c
SG
434 struct major_info *tap_major;
435
436 rcu_read_lock();
437 tap_major = tap_get_major(MAJOR(major));
438 if (!tap_major) {
439 goto unlock;
440 }
441
442 mutex_lock(&tap_major->minor_lock);
6fe3faf8 443 if (tap->minor) {
d9f1f61c 444 idr_remove(&tap_major->minor_idr, tap->minor);
6fe3faf8 445 tap->minor = 0;
e09eff7f 446 }
d9f1f61c
SG
447 mutex_unlock(&tap_major->minor_lock);
448
449unlock:
450 rcu_read_unlock();
e09eff7f
EB
451}
452
d9f1f61c 453static struct tap_dev *dev_get_by_tap_file(int major, int minor)
e09eff7f
EB
454{
455 struct net_device *dev = NULL;
6fe3faf8 456 struct tap_dev *tap;
d9f1f61c 457 struct major_info *tap_major;
e09eff7f 458
d9f1f61c
SG
459 rcu_read_lock();
460 tap_major = tap_get_major(major);
461 if (!tap_major) {
462 tap = NULL;
463 goto unlock;
464 }
465
466 mutex_lock(&tap_major->minor_lock);
467 tap = idr_find(&tap_major->minor_idr, minor);
6fe3faf8
SG
468 if (tap) {
469 dev = tap->dev;
e09eff7f
EB
470 dev_hold(dev);
471 }
d9f1f61c
SG
472 mutex_unlock(&tap_major->minor_lock);
473
474unlock:
475 rcu_read_unlock();
6fe3faf8 476 return tap;
e09eff7f
EB
477}
478
635b8c8e 479static void tap_sock_write_space(struct sock *sk)
20d29d7a 480{
43815482
ED
481 wait_queue_head_t *wqueue;
482
20d29d7a 483 if (!sock_writeable(sk) ||
9cd3e072 484 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
20d29d7a
AB
485 return;
486
43815482
ED
487 wqueue = sk_sleep(sk);
488 if (wqueue && waitqueue_active(wqueue))
489 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
20d29d7a
AB
490}
491
635b8c8e 492static void tap_sock_destruct(struct sock *sk)
2259fef0 493{
635b8c8e 494 struct tap_queue *q = container_of(sk, struct tap_queue, sk);
362899b8 495
104a4933 496 skb_array_cleanup(&q->skb_array);
2259fef0
EB
497}
498
635b8c8e 499static int tap_open(struct inode *inode, struct file *file)
20d29d7a
AB
500{
501 struct net *net = current->nsproxy->net_ns;
6fe3faf8 502 struct tap_dev *tap;
635b8c8e 503 struct tap_queue *q;
40b8fe45 504 int err = -ENODEV;
20d29d7a 505
40b8fe45 506 rtnl_lock();
d9f1f61c 507 tap = dev_get_by_tap_file(imajor(inode), iminor(inode));
6fe3faf8 508 if (!tap)
362899b8 509 goto err;
20d29d7a 510
20d29d7a 511 err = -ENOMEM;
635b8c8e
SG
512 q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
513 &tap_proto, 0);
20d29d7a 514 if (!q)
362899b8 515 goto err;
20d29d7a 516
d9a90a31 517 RCU_INIT_POINTER(q->sock.wq, &q->wq);
43815482 518 init_waitqueue_head(&q->wq.wait);
20d29d7a
AB
519 q->sock.type = SOCK_RAW;
520 q->sock.state = SS_CONNECTED;
501c774c 521 q->sock.file = file;
635b8c8e 522 q->sock.ops = &tap_socket_ops;
20d29d7a 523 sock_init_data(&q->sock, &q->sk);
635b8c8e
SG
524 q->sk.sk_write_space = tap_sock_write_space;
525 q->sk.sk_destruct = tap_sock_destruct;
b9fb9ee0 526 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
55afbd08 527 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
20d29d7a 528
97bc3633 529 /*
635b8c8e 530 * so far only KVM virtio_net uses tap, enable zero copy between
97bc3633 531 * guest kernel and host kernel when lower device supports zerocopy
047af9cf
EB
532 *
533 * The macvlan supports zerocopy iff the lower device supports zero
534 * copy so we don't have to look at the lower device directly.
97bc3633 535 */
6fe3faf8 536 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
047af9cf 537 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
97bc3633 538
362899b8 539 err = -ENOMEM;
6fe3faf8 540 if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
362899b8
JW
541 goto err_array;
542
6fe3faf8 543 err = tap_set_queue(tap, file, q);
20d29d7a 544 if (err)
362899b8 545 goto err_queue;
20d29d7a 546
6fe3faf8 547 dev_put(tap->dev);
362899b8
JW
548
549 rtnl_unlock();
550 return err;
551
552err_queue:
553 skb_array_cleanup(&q->skb_array);
554err_array:
555 sock_put(&q->sk);
556err:
6fe3faf8
SG
557 if (tap)
558 dev_put(tap->dev);
20d29d7a 559
40b8fe45 560 rtnl_unlock();
20d29d7a
AB
561 return err;
562}
563
635b8c8e 564static int tap_release(struct inode *inode, struct file *file)
20d29d7a 565{
635b8c8e
SG
566 struct tap_queue *q = file->private_data;
567 tap_put_queue(q);
20d29d7a
AB
568 return 0;
569}
570
635b8c8e 571static unsigned int tap_poll(struct file *file, poll_table *wait)
20d29d7a 572{
635b8c8e 573 struct tap_queue *q = file->private_data;
20d29d7a
AB
574 unsigned int mask = POLLERR;
575
576 if (!q)
577 goto out;
578
579 mask = 0;
43815482 580 poll_wait(file, &q->wq.wait, wait);
20d29d7a 581
362899b8 582 if (!skb_array_empty(&q->skb_array))
20d29d7a
AB
583 mask |= POLLIN | POLLRDNORM;
584
585 if (sock_writeable(&q->sk) ||
9cd3e072 586 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
20d29d7a
AB
587 sock_writeable(&q->sk)))
588 mask |= POLLOUT | POLLWRNORM;
589
590out:
20d29d7a
AB
591 return mask;
592}
593
635b8c8e
SG
594static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
595 size_t len, size_t linear,
b9fb9ee0
AB
596 int noblock, int *err)
597{
598 struct sk_buff *skb;
599
600 /* Under a page? Don't bother with paged skb. */
601 if (prepad + len < PAGE_SIZE || !linear)
602 linear = len;
603
604 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
28d64271 605 err, 0);
b9fb9ee0
AB
606 if (!skb)
607 return NULL;
608
609 skb_reserve(skb, prepad);
610 skb_put(skb, linear);
611 skb->data_len = len - linear;
612 skb->len += len - linear;
613
614 return skb;
615}
616
2f1d8b9e 617/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
635b8c8e 618#define TAP_RESERVE HH_DATA_OFF(ETH_HLEN)
2f1d8b9e 619
20d29d7a 620/* Get packet from user space buffer */
635b8c8e
SG
621static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m,
622 struct iov_iter *from, int noblock)
20d29d7a 623{
635b8c8e 624 int good_linear = SKB_MAX_HEAD(TAP_RESERVE);
20d29d7a 625 struct sk_buff *skb;
6fe3faf8 626 struct tap_dev *tap;
f5ff53b4 627 unsigned long total_len = iov_iter_count(from);
97bc3633 628 unsigned long len = total_len;
20d29d7a 629 int err;
b9fb9ee0
AB
630 struct virtio_net_hdr vnet_hdr = { 0 };
631 int vnet_hdr_len = 0;
b92946e2 632 int copylen = 0;
c5c62f1b 633 int depth;
97bc3633 634 bool zerocopy = false;
61d46bf9 635 size_t linear;
b9fb9ee0
AB
636
637 if (q->flags & IFF_VNET_HDR) {
837585a5 638 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
b9fb9ee0
AB
639
640 err = -EINVAL;
ce3c8692 641 if (len < vnet_hdr_len)
b9fb9ee0 642 goto err;
ce3c8692 643 len -= vnet_hdr_len;
b9fb9ee0 644
f5ff53b4 645 err = -EFAULT;
cbbd26b8 646 if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
b9fb9ee0 647 goto err;
f5ff53b4 648 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
b9fb9ee0 649 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
635b8c8e
SG
650 tap16_to_cpu(q, vnet_hdr.csum_start) +
651 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
652 tap16_to_cpu(q, vnet_hdr.hdr_len))
653 vnet_hdr.hdr_len = cpu_to_tap16(q,
654 tap16_to_cpu(q, vnet_hdr.csum_start) +
655 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
b9fb9ee0 656 err = -EINVAL;
635b8c8e 657 if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
b9fb9ee0
AB
658 goto err;
659 }
20d29d7a 660
b9fb9ee0 661 err = -EINVAL;
20d29d7a 662 if (unlikely(len < ETH_HLEN))
b9fb9ee0 663 goto err;
20d29d7a 664
ece793fc 665 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
f5ff53b4
AV
666 struct iov_iter i;
667
6ae7feb3 668 copylen = vnet_hdr.hdr_len ?
635b8c8e 669 tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
16a3fa28
JW
670 if (copylen > good_linear)
671 copylen = good_linear;
8e2ad411
WB
672 else if (copylen < ETH_HLEN)
673 copylen = ETH_HLEN;
61d46bf9 674 linear = copylen;
f5ff53b4
AV
675 i = *from;
676 iov_iter_advance(&i, copylen);
677 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
ece793fc
JW
678 zerocopy = true;
679 }
680
681 if (!zerocopy) {
97bc3633 682 copylen = len;
635b8c8e 683 linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
8e2ad411 684 if (linear > good_linear)
16a3fa28 685 linear = good_linear;
8e2ad411
WB
686 else if (linear < ETH_HLEN)
687 linear = ETH_HLEN;
61d46bf9 688 }
97bc3633 689
635b8c8e
SG
690 skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
691 linear, noblock, &err);
02df55d2
AB
692 if (!skb)
693 goto err;
20d29d7a 694
01d6657b 695 if (zerocopy)
f5ff53b4 696 err = zerocopy_sg_from_iter(skb, from);
aa196eed 697 else
f5ff53b4 698 err = skb_copy_datagram_from_iter(skb, 0, from, len);
ece793fc 699
02df55d2 700 if (err)
b9fb9ee0 701 goto err_kfree;
20d29d7a
AB
702
703 skb_set_network_header(skb, ETH_HLEN);
b9fb9ee0
AB
704 skb_reset_mac_header(skb);
705 skb->protocol = eth_hdr(skb)->h_proto;
706
707 if (vnet_hdr_len) {
fd88d68b 708 err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
635b8c8e 709 tap_is_little_endian(q));
b9fb9ee0
AB
710 if (err)
711 goto err_kfree;
712 }
713
40893fd0 714 skb_probe_transport_header(skb, ETH_HLEN);
9b4d669b 715
c5c62f1b
IV
716 /* Move network header to the right position for VLAN tagged packets */
717 if ((skb->protocol == htons(ETH_P_8021Q) ||
718 skb->protocol == htons(ETH_P_8021AD)) &&
719 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
720 skb_set_network_header(skb, depth);
721
ac4e4af1 722 rcu_read_lock();
6fe3faf8 723 tap = rcu_dereference(q->tap);
97bc3633 724 /* copy skb_ubuf_info for callback when skb has no error */
01d6657b 725 if (zerocopy) {
97bc3633 726 skb_shinfo(skb)->destructor_arg = m->msg_control;
01d6657b 727 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
c9af6db4 728 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
aa196eed
JW
729 } else if (m && m->msg_control) {
730 struct ubuf_info *uarg = m->msg_control;
731 uarg->callback(uarg, false);
01d6657b 732 }
aa196eed 733
6fe3faf8
SG
734 if (tap) {
735 skb->dev = tap->dev;
6acf54f1 736 dev_queue_xmit(skb);
29d79196 737 } else {
02df55d2 738 kfree_skb(skb);
29d79196 739 }
ac4e4af1 740 rcu_read_unlock();
20d29d7a 741
97bc3633 742 return total_len;
02df55d2 743
b9fb9ee0
AB
744err_kfree:
745 kfree_skb(skb);
746
02df55d2 747err:
ac4e4af1 748 rcu_read_lock();
6fe3faf8
SG
749 tap = rcu_dereference(q->tap);
750 if (tap && tap->count_tx_dropped)
751 tap->count_tx_dropped(tap);
ac4e4af1 752 rcu_read_unlock();
02df55d2 753
02df55d2 754 return err;
20d29d7a
AB
755}
756
635b8c8e 757static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from)
20d29d7a
AB
758{
759 struct file *file = iocb->ki_filp;
635b8c8e 760 struct tap_queue *q = file->private_data;
20d29d7a 761
635b8c8e 762 return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
20d29d7a
AB
763}
764
765/* Put packet to the user space buffer */
635b8c8e
SG
766static ssize_t tap_put_user(struct tap_queue *q,
767 const struct sk_buff *skb,
768 struct iov_iter *iter)
20d29d7a 769{
20d29d7a 770 int ret;
b9fb9ee0 771 int vnet_hdr_len = 0;
f09e2249 772 int vlan_offset = 0;
6c36d2e2 773 int total;
b9fb9ee0
AB
774
775 if (q->flags & IFF_VNET_HDR) {
776 struct virtio_net_hdr vnet_hdr;
837585a5 777 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
6c36d2e2 778 if (iov_iter_count(iter) < vnet_hdr_len)
b9fb9ee0
AB
779 return -EINVAL;
780
3e9e40e7 781 if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
635b8c8e 782 tap_is_little_endian(q), true))
fd88d68b 783 BUG();
b9fb9ee0 784
6c36d2e2
HX
785 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
786 sizeof(vnet_hdr))
b9fb9ee0 787 return -EFAULT;
7cc76f51
JW
788
789 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
b9fb9ee0 790 }
6c36d2e2 791 total = vnet_hdr_len;
ce232ce0 792 total += skb->len;
f09e2249 793
df8a39de 794 if (skb_vlan_tag_present(skb)) {
f09e2249
BG
795 struct {
796 __be16 h_vlan_proto;
797 __be16 h_vlan_TCI;
798 } veth;
0fbe0d47 799 veth.h_vlan_proto = skb->vlan_proto;
df8a39de 800 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
f09e2249
BG
801
802 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
ce232ce0 803 total += VLAN_HLEN;
f09e2249 804
6c36d2e2
HX
805 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
806 if (ret || !iov_iter_count(iter))
f09e2249
BG
807 goto done;
808
6c36d2e2
HX
809 ret = copy_to_iter(&veth, sizeof(veth), iter);
810 if (ret != sizeof(veth) || !iov_iter_count(iter))
f09e2249
BG
811 goto done;
812 }
20d29d7a 813
6c36d2e2
HX
814 ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
815 skb->len - vlan_offset);
20d29d7a 816
f09e2249 817done:
ce232ce0 818 return ret ? ret : total;
20d29d7a
AB
819}
820
635b8c8e
SG
821static ssize_t tap_do_read(struct tap_queue *q,
822 struct iov_iter *to,
823 int noblock)
20d29d7a 824{
ccf7e72b 825 DEFINE_WAIT(wait);
20d29d7a 826 struct sk_buff *skb;
501c774c 827 ssize_t ret = 0;
20d29d7a 828
3af0bfe5
AV
829 if (!iov_iter_count(to))
830 return 0;
831
832 while (1) {
89cee917
JW
833 if (!noblock)
834 prepare_to_wait(sk_sleep(&q->sk), &wait,
835 TASK_INTERRUPTIBLE);
20d29d7a
AB
836
837 /* Read frames from the queue */
362899b8 838 skb = skb_array_consume(&q->skb_array);
3af0bfe5
AV
839 if (skb)
840 break;
841 if (noblock) {
842 ret = -EAGAIN;
843 break;
20d29d7a 844 }
3af0bfe5
AV
845 if (signal_pending(current)) {
846 ret = -ERESTARTSYS;
847 break;
848 }
849 /* Nothing to read, let's sleep */
850 schedule();
851 }
a499a2e9
VY
852 if (!noblock)
853 finish_wait(sk_sleep(&q->sk), &wait);
854
3af0bfe5 855 if (skb) {
635b8c8e 856 ret = tap_put_user(q, skb, to);
f51a5e82
JW
857 if (unlikely(ret < 0))
858 kfree_skb(skb);
859 else
860 consume_skb(skb);
20d29d7a 861 }
501c774c
AB
862 return ret;
863}
864
635b8c8e 865static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
501c774c
AB
866{
867 struct file *file = iocb->ki_filp;
635b8c8e 868 struct tap_queue *q = file->private_data;
3af0bfe5 869 ssize_t len = iov_iter_count(to), ret;
20d29d7a 870
635b8c8e 871 ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK);
ce232ce0 872 ret = min_t(ssize_t, ret, len);
e6ebc7f1
ZYW
873 if (ret > 0)
874 iocb->ki_pos = ret;
20d29d7a
AB
875 return ret;
876}
877
6fe3faf8 878static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
8f475a31 879{
6fe3faf8 880 struct tap_dev *tap;
8f475a31 881
441ac0fc 882 ASSERT_RTNL();
6fe3faf8
SG
883 tap = rtnl_dereference(q->tap);
884 if (tap)
885 dev_hold(tap->dev);
8f475a31 886
6fe3faf8 887 return tap;
8f475a31
JW
888}
889
6fe3faf8 890static void tap_put_tap_dev(struct tap_dev *tap)
8f475a31 891{
6fe3faf8 892 dev_put(tap->dev);
8f475a31
JW
893}
894
635b8c8e 895static int tap_ioctl_set_queue(struct file *file, unsigned int flags)
815f236d 896{
635b8c8e 897 struct tap_queue *q = file->private_data;
6fe3faf8 898 struct tap_dev *tap;
815f236d
JW
899 int ret;
900
6fe3faf8
SG
901 tap = tap_get_tap_dev(q);
902 if (!tap)
815f236d
JW
903 return -EINVAL;
904
905 if (flags & IFF_ATTACH_QUEUE)
6fe3faf8 906 ret = tap_enable_queue(tap, file, q);
815f236d 907 else if (flags & IFF_DETACH_QUEUE)
635b8c8e 908 ret = tap_disable_queue(q);
f57855a5
JW
909 else
910 ret = -EINVAL;
815f236d 911
6fe3faf8 912 tap_put_tap_dev(tap);
815f236d
JW
913 return ret;
914}
915
635b8c8e 916static int set_offload(struct tap_queue *q, unsigned long arg)
2be5c767 917{
6fe3faf8 918 struct tap_dev *tap;
2be5c767
VY
919 netdev_features_t features;
920 netdev_features_t feature_mask = 0;
921
6fe3faf8
SG
922 tap = rtnl_dereference(q->tap);
923 if (!tap)
2be5c767
VY
924 return -ENOLINK;
925
6fe3faf8 926 features = tap->dev->features;
2be5c767
VY
927
928 if (arg & TUN_F_CSUM) {
929 feature_mask = NETIF_F_HW_CSUM;
930
931 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
932 if (arg & TUN_F_TSO_ECN)
933 feature_mask |= NETIF_F_TSO_ECN;
934 if (arg & TUN_F_TSO4)
935 feature_mask |= NETIF_F_TSO;
936 if (arg & TUN_F_TSO6)
937 feature_mask |= NETIF_F_TSO6;
938 }
e3e3c423
VY
939
940 if (arg & TUN_F_UFO)
941 feature_mask |= NETIF_F_UFO;
2be5c767
VY
942 }
943
944 /* tun/tap driver inverts the usage for TSO offloads, where
945 * setting the TSO bit means that the userspace wants to
946 * accept TSO frames and turning it off means that user space
947 * does not support TSO.
635b8c8e 948 * For tap, we have to invert it to mean the same thing.
2be5c767
VY
949 * When user space turns off TSO, we turn off GSO/LRO so that
950 * user-space will not receive TSO frames.
951 */
e3e3c423 952 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
2be5c767
VY
953 features |= RX_OFFLOADS;
954 else
955 features &= ~RX_OFFLOADS;
956
957 /* tap_features are the same as features on tun/tap and
958 * reflect user expectations.
959 */
6fe3faf8
SG
960 tap->tap_features = feature_mask;
961 if (tap->update_features)
962 tap->update_features(tap, features);
2be5c767
VY
963
964 return 0;
965}
966
20d29d7a
AB
967/*
968 * provide compatibility with generic tun/tap interface
969 */
635b8c8e
SG
970static long tap_ioctl(struct file *file, unsigned int cmd,
971 unsigned long arg)
20d29d7a 972{
635b8c8e 973 struct tap_queue *q = file->private_data;
6fe3faf8 974 struct tap_dev *tap;
20d29d7a
AB
975 void __user *argp = (void __user *)arg;
976 struct ifreq __user *ifr = argp;
977 unsigned int __user *up = argp;
39ec7de7 978 unsigned short u;
55afbd08 979 int __user *sp = argp;
7f460d30 980 struct sockaddr sa;
55afbd08 981 int s;
02df55d2 982 int ret;
20d29d7a
AB
983
984 switch (cmd) {
985 case TUNSETIFF:
986 /* ignore the name, just look at flags */
987 if (get_user(u, &ifr->ifr_flags))
988 return -EFAULT;
b9fb9ee0
AB
989
990 ret = 0;
635b8c8e 991 if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP))
b9fb9ee0
AB
992 ret = -EINVAL;
993 else
635b8c8e 994 q->flags = (q->flags & ~TAP_IFFEATURES) | u;
b9fb9ee0
AB
995
996 return ret;
20d29d7a
AB
997
998 case TUNGETIFF:
441ac0fc 999 rtnl_lock();
6fe3faf8
SG
1000 tap = tap_get_tap_dev(q);
1001 if (!tap) {
441ac0fc 1002 rtnl_unlock();
20d29d7a 1003 return -ENOLINK;
441ac0fc 1004 }
20d29d7a 1005
02df55d2 1006 ret = 0;
39ec7de7 1007 u = q->flags;
6fe3faf8 1008 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
39ec7de7 1009 put_user(u, &ifr->ifr_flags))
02df55d2 1010 ret = -EFAULT;
6fe3faf8 1011 tap_put_tap_dev(tap);
441ac0fc 1012 rtnl_unlock();
02df55d2 1013 return ret;
20d29d7a 1014
815f236d
JW
1015 case TUNSETQUEUE:
1016 if (get_user(u, &ifr->ifr_flags))
1017 return -EFAULT;
441ac0fc 1018 rtnl_lock();
635b8c8e 1019 ret = tap_ioctl_set_queue(file, u);
441ac0fc 1020 rtnl_unlock();
82a19eb8 1021 return ret;
815f236d 1022
20d29d7a 1023 case TUNGETFEATURES:
635b8c8e 1024 if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up))
20d29d7a
AB
1025 return -EFAULT;
1026 return 0;
1027
1028 case TUNSETSNDBUF:
3ea79249 1029 if (get_user(s, sp))
20d29d7a
AB
1030 return -EFAULT;
1031
3ea79249 1032 q->sk.sk_sndbuf = s;
20d29d7a
AB
1033 return 0;
1034
55afbd08
MT
1035 case TUNGETVNETHDRSZ:
1036 s = q->vnet_hdr_sz;
1037 if (put_user(s, sp))
1038 return -EFAULT;
1039 return 0;
1040
1041 case TUNSETVNETHDRSZ:
1042 if (get_user(s, sp))
1043 return -EFAULT;
1044 if (s < (int)sizeof(struct virtio_net_hdr))
1045 return -EINVAL;
1046
1047 q->vnet_hdr_sz = s;
1048 return 0;
1049
01b07fb3 1050 case TUNGETVNETLE:
635b8c8e 1051 s = !!(q->flags & TAP_VNET_LE);
01b07fb3
MT
1052 if (put_user(s, sp))
1053 return -EFAULT;
1054 return 0;
1055
1056 case TUNSETVNETLE:
1057 if (get_user(s, sp))
1058 return -EFAULT;
1059 if (s)
635b8c8e 1060 q->flags |= TAP_VNET_LE;
01b07fb3 1061 else
635b8c8e 1062 q->flags &= ~TAP_VNET_LE;
01b07fb3
MT
1063 return 0;
1064
8b8e658b 1065 case TUNGETVNETBE:
635b8c8e 1066 return tap_get_vnet_be(q, sp);
8b8e658b
GK
1067
1068 case TUNSETVNETBE:
635b8c8e 1069 return tap_set_vnet_be(q, sp);
8b8e658b 1070
20d29d7a
AB
1071 case TUNSETOFFLOAD:
1072 /* let the user check for future flags */
1073 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
e3e3c423 1074 TUN_F_TSO_ECN | TUN_F_UFO))
20d29d7a
AB
1075 return -EINVAL;
1076
2be5c767
VY
1077 rtnl_lock();
1078 ret = set_offload(q, arg);
1079 rtnl_unlock();
1080 return ret;
20d29d7a 1081
b5082083
JC
1082 case SIOCGIFHWADDR:
1083 rtnl_lock();
6fe3faf8
SG
1084 tap = tap_get_tap_dev(q);
1085 if (!tap) {
b5082083
JC
1086 rtnl_unlock();
1087 return -ENOLINK;
1088 }
1089 ret = 0;
6fe3faf8
SG
1090 u = tap->dev->type;
1091 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
1092 copy_to_user(&ifr->ifr_hwaddr.sa_data, tap->dev->dev_addr, ETH_ALEN) ||
b5082083
JC
1093 put_user(u, &ifr->ifr_hwaddr.sa_family))
1094 ret = -EFAULT;
6fe3faf8 1095 tap_put_tap_dev(tap);
b5082083
JC
1096 rtnl_unlock();
1097 return ret;
1098
1099 case SIOCSIFHWADDR:
7f460d30
JC
1100 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1101 return -EFAULT;
b5082083 1102 rtnl_lock();
6fe3faf8
SG
1103 tap = tap_get_tap_dev(q);
1104 if (!tap) {
b5082083
JC
1105 rtnl_unlock();
1106 return -ENOLINK;
1107 }
6fe3faf8
SG
1108 ret = dev_set_mac_address(tap->dev, &sa);
1109 tap_put_tap_dev(tap);
b5082083
JC
1110 rtnl_unlock();
1111 return ret;
1112
20d29d7a
AB
1113 default:
1114 return -EINVAL;
1115 }
1116}
1117
1118#ifdef CONFIG_COMPAT
635b8c8e
SG
1119static long tap_compat_ioctl(struct file *file, unsigned int cmd,
1120 unsigned long arg)
20d29d7a 1121{
635b8c8e 1122 return tap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
20d29d7a
AB
1123}
1124#endif
1125
635b8c8e 1126const struct file_operations tap_fops = {
20d29d7a 1127 .owner = THIS_MODULE,
635b8c8e
SG
1128 .open = tap_open,
1129 .release = tap_release,
1130 .read_iter = tap_read_iter,
1131 .write_iter = tap_write_iter,
1132 .poll = tap_poll,
20d29d7a 1133 .llseek = no_llseek,
635b8c8e 1134 .unlocked_ioctl = tap_ioctl,
20d29d7a 1135#ifdef CONFIG_COMPAT
635b8c8e 1136 .compat_ioctl = tap_compat_ioctl,
20d29d7a
AB
1137#endif
1138};
1139
635b8c8e
SG
1140static int tap_sendmsg(struct socket *sock, struct msghdr *m,
1141 size_t total_len)
501c774c 1142{
635b8c8e
SG
1143 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1144 return tap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
501c774c
AB
1145}
1146
635b8c8e
SG
1147static int tap_recvmsg(struct socket *sock, struct msghdr *m,
1148 size_t total_len, int flags)
501c774c 1149{
635b8c8e 1150 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
501c774c
AB
1151 int ret;
1152 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1153 return -EINVAL;
635b8c8e 1154 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
de2aa476
DM
1155 if (ret > total_len) {
1156 m->msg_flags |= MSG_TRUNC;
1157 ret = flags & MSG_TRUNC ? ret : total_len;
1158 }
501c774c
AB
1159 return ret;
1160}
1161
635b8c8e 1162static int tap_peek_len(struct socket *sock)
362899b8 1163{
635b8c8e 1164 struct tap_queue *q = container_of(sock, struct tap_queue,
362899b8
JW
1165 sock);
1166 return skb_array_peek_len(&q->skb_array);
1167}
1168
501c774c 1169/* Ops structure to mimic raw sockets with tun */
635b8c8e
SG
1170static const struct proto_ops tap_socket_ops = {
1171 .sendmsg = tap_sendmsg,
1172 .recvmsg = tap_recvmsg,
1173 .peek_len = tap_peek_len,
501c774c
AB
1174};
1175
1176/* Get an underlying socket object from tun file. Returns error unless file is
1177 * attached to a device. The returned object works like a packet socket, it
1178 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1179 * holding a reference to the file for as long as the socket is in use. */
635b8c8e 1180struct socket *tap_get_socket(struct file *file)
501c774c 1181{
635b8c8e
SG
1182 struct tap_queue *q;
1183 if (file->f_op != &tap_fops)
501c774c
AB
1184 return ERR_PTR(-EINVAL);
1185 q = file->private_data;
1186 if (!q)
1187 return ERR_PTR(-EBADFD);
1188 return &q->sock;
1189}
635b8c8e 1190EXPORT_SYMBOL_GPL(tap_get_socket);
501c774c 1191
6fe3faf8 1192int tap_queue_resize(struct tap_dev *tap)
362899b8 1193{
6fe3faf8 1194 struct net_device *dev = tap->dev;
635b8c8e 1195 struct tap_queue *q;
362899b8 1196 struct skb_array **arrays;
6fe3faf8 1197 int n = tap->numqueues;
362899b8
JW
1198 int ret, i = 0;
1199
1200 arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
1201 if (!arrays)
1202 return -ENOMEM;
1203
6fe3faf8 1204 list_for_each_entry(q, &tap->queue_list, next)
362899b8
JW
1205 arrays[i++] = &q->skb_array;
1206
1207 ret = skb_array_resize_multiple(arrays, n,
1208 dev->tx_queue_len, GFP_KERNEL);
1209
1210 kfree(arrays);
1211 return ret;
1212}
ebc05ba7 1213
d9f1f61c
SG
1214static int tap_list_add(dev_t major, const char *device_name)
1215{
1216 struct major_info *tap_major;
1217
1218 tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC);
1219 if (!tap_major)
1220 return -ENOMEM;
1221
1222 tap_major->major = MAJOR(major);
1223
1224 idr_init(&tap_major->minor_idr);
1225 mutex_init(&tap_major->minor_lock);
1226
1227 tap_major->device_name = device_name;
1228
1229 list_add_tail_rcu(&tap_major->next, &major_list);
1230 return 0;
1231}
1232
ebc05ba7
SG
1233int tap_create_cdev(struct cdev *tap_cdev,
1234 dev_t *tap_major, const char *device_name)
1235{
1236 int err;
1237
1238 err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name);
1239 if (err)
1240 goto out1;
1241
1242 cdev_init(tap_cdev, &tap_fops);
1243 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
1244 if (err)
1245 goto out2;
1246
d9f1f61c
SG
1247 err = tap_list_add(*tap_major, device_name);
1248 if (err)
1249 goto out3;
ebc05ba7
SG
1250
1251 return 0;
1252
d9f1f61c
SG
1253out3:
1254 cdev_del(tap_cdev);
ebc05ba7
SG
1255out2:
1256 unregister_chrdev_region(*tap_major, TAP_NUM_DEVS);
1257out1:
1258 return err;
1259}
1260
1261void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev)
1262{
d9f1f61c
SG
1263 struct major_info *tap_major, *tmp;
1264
ebc05ba7
SG
1265 cdev_del(tap_cdev);
1266 unregister_chrdev_region(major, TAP_NUM_DEVS);
d9f1f61c
SG
1267 list_for_each_entry_safe(tap_major, tmp, &major_list, next) {
1268 if (tap_major->major == MAJOR(major)) {
1269 idr_destroy(&tap_major->minor_idr);
1270 list_del_rcu(&tap_major->next);
1271 kfree_rcu(tap_major, rcu);
1272 }
1273 }
ebc05ba7 1274}