net: pcs: xpcs: fix incorrect number of interfaces
[linux-2.6-block.git] / drivers / net / tap.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
20d29d7a 2#include <linux/etherdevice.h>
6fe3faf8 3#include <linux/if_tap.h>
f09e2249 4#include <linux/if_vlan.h>
20d29d7a
AB
5#include <linux/interrupt.h>
6#include <linux/nsproxy.h>
7#include <linux/compat.h>
8#include <linux/if_tun.h>
9#include <linux/module.h>
10#include <linux/skbuff.h>
11#include <linux/cache.h>
c3edc401 12#include <linux/sched/signal.h>
20d29d7a 13#include <linux/types.h>
5a0e3ad6 14#include <linux/slab.h>
20d29d7a
AB
15#include <linux/wait.h>
16#include <linux/cdev.h>
40401530 17#include <linux/idr.h>
20d29d7a 18#include <linux/fs.h>
6c36d2e2 19#include <linux/uio.h>
20d29d7a
AB
20
21#include <net/net_namespace.h>
22#include <net/rtnetlink.h>
23#include <net/sock.h>
b9fb9ee0 24#include <linux/virtio_net.h>
362899b8 25#include <linux/skb_array.h>
20d29d7a 26
635b8c8e 27#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
01b07fb3 28
635b8c8e
SG
29#define TAP_VNET_LE 0x80000000
30#define TAP_VNET_BE 0x40000000
8b8e658b
GK
31
32#ifdef CONFIG_TUN_VNET_CROSS_LE
635b8c8e 33static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
8b8e658b 34{
635b8c8e 35 return q->flags & TAP_VNET_BE ? false :
8b8e658b
GK
36 virtio_legacy_is_little_endian();
37}
38
635b8c8e 39static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
8b8e658b 40{
635b8c8e 41 int s = !!(q->flags & TAP_VNET_BE);
8b8e658b
GK
42
43 if (put_user(s, sp))
44 return -EFAULT;
45
46 return 0;
47}
48
635b8c8e 49static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
8b8e658b
GK
50{
51 int s;
52
53 if (get_user(s, sp))
54 return -EFAULT;
55
56 if (s)
635b8c8e 57 q->flags |= TAP_VNET_BE;
8b8e658b 58 else
635b8c8e 59 q->flags &= ~TAP_VNET_BE;
8b8e658b
GK
60
61 return 0;
62}
63#else
635b8c8e 64static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
8b8e658b
GK
65{
66 return virtio_legacy_is_little_endian();
67}
68
635b8c8e 69static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
8b8e658b
GK
70{
71 return -EINVAL;
72}
73
635b8c8e 74static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
8b8e658b
GK
75{
76 return -EINVAL;
77}
78#endif /* CONFIG_TUN_VNET_CROSS_LE */
6ae7feb3 79
635b8c8e 80static inline bool tap_is_little_endian(struct tap_queue *q)
5b11e15f 81{
635b8c8e
SG
82 return q->flags & TAP_VNET_LE ||
83 tap_legacy_is_little_endian(q);
5b11e15f 84}
6ae7feb3 85
635b8c8e 86static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
6ae7feb3 87{
635b8c8e 88 return __virtio16_to_cpu(tap_is_little_endian(q), val);
6ae7feb3
MT
89}
90
635b8c8e 91static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
6ae7feb3 92{
635b8c8e 93 return __cpu_to_virtio16(tap_is_little_endian(q), val);
6ae7feb3
MT
94}
95
635b8c8e
SG
96static struct proto tap_proto = {
97 .name = "tap",
20d29d7a 98 .owner = THIS_MODULE,
635b8c8e 99 .obj_size = sizeof(struct tap_queue),
20d29d7a
AB
100};
101
635b8c8e 102#define TAP_NUM_DEVS (1U << MINORBITS)
d9f1f61c
SG
103
104static LIST_HEAD(major_list);
105
ebc05ba7 106struct major_info {
d9f1f61c 107 struct rcu_head rcu;
ebc05ba7
SG
108 dev_t major;
109 struct idr minor_idr;
ffa423fb 110 spinlock_t minor_lock;
ebc05ba7 111 const char *device_name;
d9f1f61c
SG
112 struct list_head next;
113};
e09eff7f 114
97bc3633 115#define GOODCOPY_LEN 128
20d29d7a 116
635b8c8e 117static const struct proto_ops tap_socket_ops;
501c774c 118
2be5c767 119#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
f23d538b 120#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
a567dd62 121
6fe3faf8 122static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev)
6acf54f1
VY
123{
124 return rcu_dereference(dev->rx_handler_data);
125}
126
20d29d7a
AB
127/*
128 * RCU usage:
635b8c8e 129 * The tap_queue and the macvlan_dev are loosely coupled, the
02df55d2 130 * pointers from one to the other can only be read while rcu_read_lock
441ac0fc 131 * or rtnl is held.
20d29d7a 132 *
635b8c8e 133 * Both the file and the macvlan_dev hold a reference on the tap_queue
02df55d2
AB
134 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
135 * q->vlan becomes inaccessible. When the files gets closed,
635b8c8e 136 * tap_get_queue() fails.
20d29d7a 137 *
02df55d2
AB
138 * There may still be references to the struct sock inside of the
139 * queue from outbound SKBs, but these never reference back to the
140 * file or the dev. The data structure is freed through __sk_free
141 * when both our references and any pending SKBs are gone.
20d29d7a 142 */
20d29d7a 143
6fe3faf8 144static int tap_enable_queue(struct tap_dev *tap, struct file *file,
635b8c8e 145 struct tap_queue *q)
815f236d 146{
815f236d
JW
147 int err = -EINVAL;
148
441ac0fc 149 ASSERT_RTNL();
815f236d
JW
150
151 if (q->enabled)
152 goto out;
153
154 err = 0;
6fe3faf8
SG
155 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
156 q->queue_index = tap->numvtaps;
815f236d
JW
157 q->enabled = true;
158
6fe3faf8 159 tap->numvtaps++;
815f236d 160out:
815f236d
JW
161 return err;
162}
163
40b8fe45 164/* Requires RTNL */
6fe3faf8 165static int tap_set_queue(struct tap_dev *tap, struct file *file,
635b8c8e 166 struct tap_queue *q)
20d29d7a 167{
6fe3faf8 168 if (tap->numqueues == MAX_TAP_QUEUES)
40b8fe45 169 return -EBUSY;
20d29d7a 170
6fe3faf8
SG
171 rcu_assign_pointer(q->tap, tap);
172 rcu_assign_pointer(tap->taps[tap->numvtaps], q);
02df55d2 173 sock_hold(&q->sk);
20d29d7a
AB
174
175 q->file = file;
6fe3faf8 176 q->queue_index = tap->numvtaps;
815f236d 177 q->enabled = true;
02df55d2 178 file->private_data = q;
6fe3faf8 179 list_add_tail(&q->next, &tap->queue_list);
20d29d7a 180
6fe3faf8
SG
181 tap->numvtaps++;
182 tap->numqueues++;
1565c7c1 183
40b8fe45 184 return 0;
20d29d7a
AB
185}
186
635b8c8e 187static int tap_disable_queue(struct tap_queue *q)
815f236d 188{
6fe3faf8 189 struct tap_dev *tap;
635b8c8e 190 struct tap_queue *nq;
815f236d 191
441ac0fc 192 ASSERT_RTNL();
815f236d
JW
193 if (!q->enabled)
194 return -EINVAL;
195
6fe3faf8 196 tap = rtnl_dereference(q->tap);
441ac0fc 197
6fe3faf8 198 if (tap) {
815f236d 199 int index = q->queue_index;
6fe3faf8
SG
200 BUG_ON(index >= tap->numvtaps);
201 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]);
815f236d
JW
202 nq->queue_index = index;
203
6fe3faf8
SG
204 rcu_assign_pointer(tap->taps[index], nq);
205 RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL);
815f236d
JW
206 q->enabled = false;
207
6fe3faf8 208 tap->numvtaps--;
815f236d
JW
209 }
210
211 return 0;
212}
213
20d29d7a 214/*
02df55d2
AB
215 * The file owning the queue got closed, give up both
216 * the reference that the files holds as well as the
217 * one from the macvlan_dev if that still exists.
20d29d7a
AB
218 *
219 * Using the spinlock makes sure that we don't get
220 * to the queue again after destroying it.
20d29d7a 221 */
635b8c8e 222static void tap_put_queue(struct tap_queue *q)
20d29d7a 223{
6fe3faf8 224 struct tap_dev *tap;
20d29d7a 225
441ac0fc 226 rtnl_lock();
6fe3faf8 227 tap = rtnl_dereference(q->tap);
441ac0fc 228
6fe3faf8 229 if (tap) {
815f236d 230 if (q->enabled)
635b8c8e 231 BUG_ON(tap_disable_queue(q));
376b1aab 232
6fe3faf8
SG
233 tap->numqueues--;
234 RCU_INIT_POINTER(q->tap, NULL);
02df55d2 235 sock_put(&q->sk);
815f236d 236 list_del_init(&q->next);
20d29d7a
AB
237 }
238
441ac0fc 239 rtnl_unlock();
20d29d7a
AB
240
241 synchronize_rcu();
242 sock_put(&q->sk);
243}
244
245/*
1565c7c1
KK
246 * Select a queue based on the rxq of the device on which this packet
247 * arrived. If the incoming device is not mq, calculate a flow hash
248 * to select a queue. If all fails, find the first available queue.
249 * Cache vlan->numvtaps since it can become zero during the execution
250 * of this function.
20d29d7a 251 */
6fe3faf8 252static struct tap_queue *tap_get_queue(struct tap_dev *tap,
635b8c8e 253 struct sk_buff *skb)
20d29d7a 254{
6fe3faf8 255 struct tap_queue *queue = NULL;
815f236d
JW
256 /* Access to taps array is protected by rcu, but access to numvtaps
257 * isn't. Below we use it to lookup a queue, but treat it as a hint
258 * and validate that the result isn't NULL - in case we are
259 * racing against queue removal.
260 */
6aa7de05 261 int numvtaps = READ_ONCE(tap->numvtaps);
1565c7c1
KK
262 __u32 rxq;
263
264 if (!numvtaps)
265 goto out;
266
1b16bf42
JW
267 if (numvtaps == 1)
268 goto single;
269
ef0002b5 270 /* Check if we can use flow to select a queue */
3958afa1 271 rxq = skb_get_hash(skb);
ef0002b5 272 if (rxq) {
6fe3faf8 273 queue = rcu_dereference(tap->taps[rxq % numvtaps]);
376b1aab 274 goto out;
ef0002b5
KK
275 }
276
1565c7c1
KK
277 if (likely(skb_rx_queue_recorded(skb))) {
278 rxq = skb_get_rx_queue(skb);
20d29d7a 279
1565c7c1
KK
280 while (unlikely(rxq >= numvtaps))
281 rxq -= numvtaps;
282
6fe3faf8 283 queue = rcu_dereference(tap->taps[rxq]);
376b1aab 284 goto out;
1565c7c1
KK
285 }
286
1b16bf42 287single:
6fe3faf8 288 queue = rcu_dereference(tap->taps[0]);
1565c7c1 289out:
6fe3faf8 290 return queue;
20d29d7a
AB
291}
292
02df55d2
AB
293/*
294 * The net_device is going away, give up the reference
1565c7c1
KK
295 * that it holds on all queues and safely set the pointer
296 * from the queues to NULL.
02df55d2 297 */
6fe3faf8 298void tap_del_queues(struct tap_dev *tap)
20d29d7a 299{
635b8c8e 300 struct tap_queue *q, *tmp;
02df55d2 301
441ac0fc 302 ASSERT_RTNL();
6fe3faf8 303 list_for_each_entry_safe(q, tmp, &tap->queue_list, next) {
815f236d 304 list_del_init(&q->next);
6fe3faf8 305 RCU_INIT_POINTER(q->tap, NULL);
815f236d 306 if (q->enabled)
6fe3faf8
SG
307 tap->numvtaps--;
308 tap->numqueues--;
dfe816c5 309 sock_put(&q->sk);
564517e8 310 }
6fe3faf8
SG
311 BUG_ON(tap->numvtaps);
312 BUG_ON(tap->numqueues);
635b8c8e 313 /* guarantee that any future tap_set_queue will fail */
6fe3faf8 314 tap->numvtaps = MAX_TAP_QUEUES;
20d29d7a 315}
9a393b5d 316EXPORT_SYMBOL_GPL(tap_del_queues);
20d29d7a 317
635b8c8e 318rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
20d29d7a 319{
6acf54f1
VY
320 struct sk_buff *skb = *pskb;
321 struct net_device *dev = skb->dev;
6fe3faf8 322 struct tap_dev *tap;
635b8c8e 323 struct tap_queue *q;
a567dd62 324 netdev_features_t features = TAP_FEATURES;
736f16de 325 enum skb_drop_reason drop_reason;
a567dd62 326
6fe3faf8
SG
327 tap = tap_dev_get_rcu(dev);
328 if (!tap)
6acf54f1
VY
329 return RX_HANDLER_PASS;
330
6fe3faf8 331 q = tap_get_queue(tap, skb);
20d29d7a 332 if (!q)
6acf54f1 333 return RX_HANDLER_PASS;
8a35747a 334
6acf54f1
VY
335 skb_push(skb, ETH_HLEN);
336
3e4f8b78 337 /* Apply the forward feature mask so that we perform segmentation
e5733321
VY
338 * according to users wishes. This only works if VNET_HDR is
339 * enabled.
3e4f8b78 340 */
e5733321 341 if (q->flags & IFF_VNET_HDR)
6fe3faf8 342 features |= tap->tap_features;
8b86a61d 343 if (netif_needs_gso(skb, features)) {
3e4f8b78 344 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
5643a552 345 struct sk_buff *next;
3e4f8b78 346
736f16de
DZ
347 if (IS_ERR(segs)) {
348 drop_reason = SKB_DROP_REASON_SKB_GSO_SEG;
3e4f8b78 349 goto drop;
736f16de 350 }
3e4f8b78
VY
351
352 if (!segs) {
736f16de
DZ
353 if (ptr_ring_produce(&q->ring, skb)) {
354 drop_reason = SKB_DROP_REASON_FULL_RING;
362899b8 355 goto drop;
736f16de 356 }
3e4f8b78
VY
357 goto wake_up;
358 }
359
be0bd316 360 consume_skb(skb);
5643a552
JD
361 skb_list_walk_safe(segs, skb, next) {
362 skb_mark_not_on_list(skb);
363 if (ptr_ring_produce(&q->ring, skb)) {
736f16de
DZ
364 drop_reason = SKB_DROP_REASON_FULL_RING;
365 kfree_skb_reason(skb, drop_reason);
366 kfree_skb_list_reason(next, drop_reason);
362899b8
JW
367 break;
368 }
3e4f8b78
VY
369 }
370 } else {
cbdb0427
VY
371 /* If we receive a partial checksum and the tap side
372 * doesn't support checksum offload, compute the checksum.
373 * Note: it doesn't matter which checksum feature to
a8e04698 374 * check, we either support them all or none.
cbdb0427
VY
375 */
376 if (skb->ip_summed == CHECKSUM_PARTIAL &&
a188222b 377 !(features & NETIF_F_CSUM_MASK) &&
736f16de
DZ
378 skb_checksum_help(skb)) {
379 drop_reason = SKB_DROP_REASON_SKB_CSUM;
cbdb0427 380 goto drop;
736f16de
DZ
381 }
382 if (ptr_ring_produce(&q->ring, skb)) {
383 drop_reason = SKB_DROP_REASON_FULL_RING;
362899b8 384 goto drop;
736f16de 385 }
3e4f8b78
VY
386 }
387
388wake_up:
a9a08845 389 wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND);
6acf54f1 390 return RX_HANDLER_CONSUMED;
8a35747a
HX
391
392drop:
6acf54f1 393 /* Count errors/drops only here, thus don't care about args. */
6fe3faf8
SG
394 if (tap->count_rx_dropped)
395 tap->count_rx_dropped(tap);
736f16de 396 kfree_skb_reason(skb, drop_reason);
6acf54f1 397 return RX_HANDLER_CONSUMED;
20d29d7a 398}
9a393b5d 399EXPORT_SYMBOL_GPL(tap_handle_frame);
20d29d7a 400
d9f1f61c
SG
401static struct major_info *tap_get_major(int major)
402{
403 struct major_info *tap_major;
404
405 list_for_each_entry_rcu(tap_major, &major_list, next) {
406 if (tap_major->major == major)
407 return tap_major;
408 }
409
410 return NULL;
411}
412
413int tap_get_minor(dev_t major, struct tap_dev *tap)
e09eff7f
EB
414{
415 int retval = -ENOMEM;
d9f1f61c
SG
416 struct major_info *tap_major;
417
418 rcu_read_lock();
419 tap_major = tap_get_major(MAJOR(major));
420 if (!tap_major) {
421 retval = -EINVAL;
422 goto unlock;
423 }
e09eff7f 424
ffa423fb
WC
425 spin_lock(&tap_major->minor_lock);
426 retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC);
ec09ebc1 427 if (retval >= 0) {
6fe3faf8 428 tap->minor = retval;
ec09ebc1 429 } else if (retval == -ENOSPC) {
6fe3faf8 430 netdev_err(tap->dev, "Too many tap devices\n");
e09eff7f 431 retval = -EINVAL;
e09eff7f 432 }
ffa423fb 433 spin_unlock(&tap_major->minor_lock);
d9f1f61c
SG
434
435unlock:
436 rcu_read_unlock();
ec09ebc1 437 return retval < 0 ? retval : 0;
e09eff7f 438}
9a393b5d 439EXPORT_SYMBOL_GPL(tap_get_minor);
e09eff7f 440
d9f1f61c 441void tap_free_minor(dev_t major, struct tap_dev *tap)
e09eff7f 442{
d9f1f61c
SG
443 struct major_info *tap_major;
444
445 rcu_read_lock();
446 tap_major = tap_get_major(MAJOR(major));
447 if (!tap_major) {
448 goto unlock;
449 }
450
ffa423fb 451 spin_lock(&tap_major->minor_lock);
6fe3faf8 452 if (tap->minor) {
d9f1f61c 453 idr_remove(&tap_major->minor_idr, tap->minor);
6fe3faf8 454 tap->minor = 0;
e09eff7f 455 }
ffa423fb 456 spin_unlock(&tap_major->minor_lock);
d9f1f61c
SG
457
458unlock:
459 rcu_read_unlock();
e09eff7f 460}
9a393b5d 461EXPORT_SYMBOL_GPL(tap_free_minor);
e09eff7f 462
d9f1f61c 463static struct tap_dev *dev_get_by_tap_file(int major, int minor)
e09eff7f
EB
464{
465 struct net_device *dev = NULL;
6fe3faf8 466 struct tap_dev *tap;
d9f1f61c 467 struct major_info *tap_major;
e09eff7f 468
d9f1f61c
SG
469 rcu_read_lock();
470 tap_major = tap_get_major(major);
471 if (!tap_major) {
472 tap = NULL;
473 goto unlock;
474 }
475
ffa423fb 476 spin_lock(&tap_major->minor_lock);
d9f1f61c 477 tap = idr_find(&tap_major->minor_idr, minor);
6fe3faf8
SG
478 if (tap) {
479 dev = tap->dev;
e09eff7f
EB
480 dev_hold(dev);
481 }
ffa423fb 482 spin_unlock(&tap_major->minor_lock);
d9f1f61c
SG
483
484unlock:
485 rcu_read_unlock();
6fe3faf8 486 return tap;
e09eff7f
EB
487}
488
635b8c8e 489static void tap_sock_write_space(struct sock *sk)
20d29d7a 490{
43815482
ED
491 wait_queue_head_t *wqueue;
492
20d29d7a 493 if (!sock_writeable(sk) ||
9cd3e072 494 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
20d29d7a
AB
495 return;
496
43815482
ED
497 wqueue = sk_sleep(sk);
498 if (wqueue && waitqueue_active(wqueue))
a9a08845 499 wake_up_interruptible_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
20d29d7a
AB
500}
501
635b8c8e 502static void tap_sock_destruct(struct sock *sk)
2259fef0 503{
635b8c8e 504 struct tap_queue *q = container_of(sk, struct tap_queue, sk);
362899b8 505
5990a305 506 ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
2259fef0
EB
507}
508
635b8c8e 509static int tap_open(struct inode *inode, struct file *file)
20d29d7a
AB
510{
511 struct net *net = current->nsproxy->net_ns;
6fe3faf8 512 struct tap_dev *tap;
635b8c8e 513 struct tap_queue *q;
40b8fe45 514 int err = -ENODEV;
20d29d7a 515
40b8fe45 516 rtnl_lock();
d9f1f61c 517 tap = dev_get_by_tap_file(imajor(inode), iminor(inode));
6fe3faf8 518 if (!tap)
362899b8 519 goto err;
20d29d7a 520
20d29d7a 521 err = -ENOMEM;
635b8c8e
SG
522 q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
523 &tap_proto, 0);
20d29d7a 524 if (!q)
362899b8 525 goto err;
5990a305 526 if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
78e0ea67
GM
527 sk_free(&q->sk);
528 goto err;
529 }
20d29d7a 530
333f7909 531 init_waitqueue_head(&q->sock.wq.wait);
20d29d7a
AB
532 q->sock.type = SOCK_RAW;
533 q->sock.state = SS_CONNECTED;
501c774c 534 q->sock.file = file;
635b8c8e 535 q->sock.ops = &tap_socket_ops;
66b2c338 536 sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
635b8c8e
SG
537 q->sk.sk_write_space = tap_sock_write_space;
538 q->sk.sk_destruct = tap_sock_destruct;
b9fb9ee0 539 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
55afbd08 540 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
20d29d7a 541
97bc3633 542 /*
635b8c8e 543 * so far only KVM virtio_net uses tap, enable zero copy between
97bc3633 544 * guest kernel and host kernel when lower device supports zerocopy
047af9cf
EB
545 *
546 * The macvlan supports zerocopy iff the lower device supports zero
547 * copy so we don't have to look at the lower device directly.
97bc3633 548 */
6fe3faf8 549 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
047af9cf 550 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
97bc3633 551
6fe3faf8 552 err = tap_set_queue(tap, file, q);
78e0ea67 553 if (err) {
5990a305 554 /* tap_sock_destruct() will take care of freeing ptr_ring */
78e0ea67
GM
555 goto err_put;
556 }
20d29d7a 557
f758bfec
JA
558 /* tap groks IOCB_NOWAIT just fine, mark it as such */
559 file->f_mode |= FMODE_NOWAIT;
560
6fe3faf8 561 dev_put(tap->dev);
362899b8
JW
562
563 rtnl_unlock();
564 return err;
565
78e0ea67 566err_put:
362899b8
JW
567 sock_put(&q->sk);
568err:
6fe3faf8
SG
569 if (tap)
570 dev_put(tap->dev);
20d29d7a 571
40b8fe45 572 rtnl_unlock();
20d29d7a
AB
573 return err;
574}
575
635b8c8e 576static int tap_release(struct inode *inode, struct file *file)
20d29d7a 577{
635b8c8e
SG
578 struct tap_queue *q = file->private_data;
579 tap_put_queue(q);
20d29d7a
AB
580 return 0;
581}
582
afc9a42b 583static __poll_t tap_poll(struct file *file, poll_table *wait)
20d29d7a 584{
635b8c8e 585 struct tap_queue *q = file->private_data;
a9a08845 586 __poll_t mask = EPOLLERR;
20d29d7a
AB
587
588 if (!q)
589 goto out;
590
591 mask = 0;
333f7909 592 poll_wait(file, &q->sock.wq.wait, wait);
20d29d7a 593
5990a305 594 if (!ptr_ring_empty(&q->ring))
a9a08845 595 mask |= EPOLLIN | EPOLLRDNORM;
20d29d7a
AB
596
597 if (sock_writeable(&q->sk) ||
9cd3e072 598 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
20d29d7a 599 sock_writeable(&q->sk)))
a9a08845 600 mask |= EPOLLOUT | EPOLLWRNORM;
20d29d7a
AB
601
602out:
20d29d7a
AB
603 return mask;
604}
605
635b8c8e
SG
606static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
607 size_t len, size_t linear,
b9fb9ee0
AB
608 int noblock, int *err)
609{
610 struct sk_buff *skb;
611
612 /* Under a page? Don't bother with paged skb. */
613 if (prepad + len < PAGE_SIZE || !linear)
614 linear = len;
615
616 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
28d64271 617 err, 0);
b9fb9ee0
AB
618 if (!skb)
619 return NULL;
620
621 skb_reserve(skb, prepad);
622 skb_put(skb, linear);
623 skb->data_len = len - linear;
624 skb->len += len - linear;
625
626 return skb;
627}
628
2f1d8b9e 629/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
635b8c8e 630#define TAP_RESERVE HH_DATA_OFF(ETH_HLEN)
2f1d8b9e 631
20d29d7a 632/* Get packet from user space buffer */
fe8dd45b 633static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
635b8c8e 634 struct iov_iter *from, int noblock)
20d29d7a 635{
635b8c8e 636 int good_linear = SKB_MAX_HEAD(TAP_RESERVE);
20d29d7a 637 struct sk_buff *skb;
6fe3faf8 638 struct tap_dev *tap;
f5ff53b4 639 unsigned long total_len = iov_iter_count(from);
97bc3633 640 unsigned long len = total_len;
20d29d7a 641 int err;
b9fb9ee0
AB
642 struct virtio_net_hdr vnet_hdr = { 0 };
643 int vnet_hdr_len = 0;
b92946e2 644 int copylen = 0;
c5c62f1b 645 int depth;
97bc3633 646 bool zerocopy = false;
61d46bf9 647 size_t linear;
736f16de 648 enum skb_drop_reason drop_reason;
b9fb9ee0
AB
649
650 if (q->flags & IFF_VNET_HDR) {
837585a5 651 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
b9fb9ee0
AB
652
653 err = -EINVAL;
ce3c8692 654 if (len < vnet_hdr_len)
b9fb9ee0 655 goto err;
ce3c8692 656 len -= vnet_hdr_len;
b9fb9ee0 657
f5ff53b4 658 err = -EFAULT;
cbbd26b8 659 if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
b9fb9ee0 660 goto err;
f5ff53b4 661 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
b9fb9ee0 662 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
635b8c8e
SG
663 tap16_to_cpu(q, vnet_hdr.csum_start) +
664 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
665 tap16_to_cpu(q, vnet_hdr.hdr_len))
666 vnet_hdr.hdr_len = cpu_to_tap16(q,
667 tap16_to_cpu(q, vnet_hdr.csum_start) +
668 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
b9fb9ee0 669 err = -EINVAL;
635b8c8e 670 if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
b9fb9ee0
AB
671 goto err;
672 }
20d29d7a 673
b9fb9ee0 674 err = -EINVAL;
20d29d7a 675 if (unlikely(len < ETH_HLEN))
b9fb9ee0 676 goto err;
20d29d7a 677
fe8dd45b 678 if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
f5ff53b4
AV
679 struct iov_iter i;
680
6ae7feb3 681 copylen = vnet_hdr.hdr_len ?
635b8c8e 682 tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
16a3fa28
JW
683 if (copylen > good_linear)
684 copylen = good_linear;
8e2ad411
WB
685 else if (copylen < ETH_HLEN)
686 copylen = ETH_HLEN;
61d46bf9 687 linear = copylen;
f5ff53b4
AV
688 i = *from;
689 iov_iter_advance(&i, copylen);
690 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
ece793fc
JW
691 zerocopy = true;
692 }
693
694 if (!zerocopy) {
97bc3633 695 copylen = len;
635b8c8e 696 linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
8e2ad411 697 if (linear > good_linear)
16a3fa28 698 linear = good_linear;
8e2ad411
WB
699 else if (linear < ETH_HLEN)
700 linear = ETH_HLEN;
61d46bf9 701 }
97bc3633 702
635b8c8e
SG
703 skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
704 linear, noblock, &err);
02df55d2
AB
705 if (!skb)
706 goto err;
20d29d7a 707
01d6657b 708 if (zerocopy)
f5ff53b4 709 err = zerocopy_sg_from_iter(skb, from);
aa196eed 710 else
f5ff53b4 711 err = skb_copy_datagram_from_iter(skb, 0, from, len);
ece793fc 712
736f16de
DZ
713 if (err) {
714 drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
b9fb9ee0 715 goto err_kfree;
736f16de 716 }
20d29d7a
AB
717
718 skb_set_network_header(skb, ETH_HLEN);
b9fb9ee0
AB
719 skb_reset_mac_header(skb);
720 skb->protocol = eth_hdr(skb)->h_proto;
721
4f61f133
CB
722 rcu_read_lock();
723 tap = rcu_dereference(q->tap);
724 if (!tap) {
725 kfree_skb(skb);
726 rcu_read_unlock();
727 return total_len;
728 }
729 skb->dev = tap->dev;
730
b9fb9ee0 731 if (vnet_hdr_len) {
fd88d68b 732 err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
635b8c8e 733 tap_is_little_endian(q));
736f16de 734 if (err) {
4f61f133 735 rcu_read_unlock();
736f16de 736 drop_reason = SKB_DROP_REASON_DEV_HDR;
b9fb9ee0 737 goto err_kfree;
736f16de 738 }
b9fb9ee0
AB
739 }
740
d2aa125d 741 skb_probe_transport_header(skb);
9b4d669b 742
c5c62f1b 743 /* Move network header to the right position for VLAN tagged packets */
b69df260 744 if (eth_type_vlan(skb->protocol) &&
c5c62f1b
IV
745 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
746 skb_set_network_header(skb, depth);
747
97bc3633 748 /* copy skb_ubuf_info for callback when skb has no error */
01d6657b 749 if (zerocopy) {
9ee5e5ad 750 skb_zcopy_init(skb, msg_control);
fe8dd45b
JW
751 } else if (msg_control) {
752 struct ubuf_info *uarg = msg_control;
36177832 753 uarg->callback(NULL, uarg, false);
01d6657b 754 }
aa196eed 755
4f61f133 756 dev_queue_xmit(skb);
ac4e4af1 757 rcu_read_unlock();
97bc3633 758 return total_len;
02df55d2 759
b9fb9ee0 760err_kfree:
736f16de 761 kfree_skb_reason(skb, drop_reason);
b9fb9ee0 762
02df55d2 763err:
ac4e4af1 764 rcu_read_lock();
6fe3faf8
SG
765 tap = rcu_dereference(q->tap);
766 if (tap && tap->count_tx_dropped)
767 tap->count_tx_dropped(tap);
ac4e4af1 768 rcu_read_unlock();
02df55d2 769
02df55d2 770 return err;
20d29d7a
AB
771}
772
635b8c8e 773static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from)
20d29d7a
AB
774{
775 struct file *file = iocb->ki_filp;
635b8c8e 776 struct tap_queue *q = file->private_data;
f758bfec
JA
777 int noblock = 0;
778
779 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
780 noblock = 1;
20d29d7a 781
f758bfec 782 return tap_get_user(q, NULL, from, noblock);
20d29d7a
AB
783}
784
785/* Put packet to the user space buffer */
635b8c8e
SG
786static ssize_t tap_put_user(struct tap_queue *q,
787 const struct sk_buff *skb,
788 struct iov_iter *iter)
20d29d7a 789{
20d29d7a 790 int ret;
b9fb9ee0 791 int vnet_hdr_len = 0;
f09e2249 792 int vlan_offset = 0;
6c36d2e2 793 int total;
b9fb9ee0
AB
794
795 if (q->flags & IFF_VNET_HDR) {
fd3a8862 796 int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
b9fb9ee0 797 struct virtio_net_hdr vnet_hdr;
fd3a8862 798
837585a5 799 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
6c36d2e2 800 if (iov_iter_count(iter) < vnet_hdr_len)
b9fb9ee0
AB
801 return -EINVAL;
802
3e9e40e7 803 if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
fd3a8862
WB
804 tap_is_little_endian(q), true,
805 vlan_hlen))
fd88d68b 806 BUG();
b9fb9ee0 807
6c36d2e2
HX
808 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
809 sizeof(vnet_hdr))
b9fb9ee0 810 return -EFAULT;
7cc76f51
JW
811
812 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
b9fb9ee0 813 }
6c36d2e2 814 total = vnet_hdr_len;
ce232ce0 815 total += skb->len;
f09e2249 816
df8a39de 817 if (skb_vlan_tag_present(skb)) {
f09e2249
BG
818 struct {
819 __be16 h_vlan_proto;
820 __be16 h_vlan_TCI;
821 } veth;
0fbe0d47 822 veth.h_vlan_proto = skb->vlan_proto;
df8a39de 823 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
f09e2249
BG
824
825 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
ce232ce0 826 total += VLAN_HLEN;
f09e2249 827
6c36d2e2
HX
828 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
829 if (ret || !iov_iter_count(iter))
f09e2249
BG
830 goto done;
831
6c36d2e2
HX
832 ret = copy_to_iter(&veth, sizeof(veth), iter);
833 if (ret != sizeof(veth) || !iov_iter_count(iter))
f09e2249
BG
834 goto done;
835 }
20d29d7a 836
6c36d2e2
HX
837 ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
838 skb->len - vlan_offset);
20d29d7a 839
f09e2249 840done:
ce232ce0 841 return ret ? ret : total;
20d29d7a
AB
842}
843
635b8c8e
SG
844static ssize_t tap_do_read(struct tap_queue *q,
845 struct iov_iter *to,
3b4ba04a 846 int noblock, struct sk_buff *skb)
20d29d7a 847{
ccf7e72b 848 DEFINE_WAIT(wait);
501c774c 849 ssize_t ret = 0;
20d29d7a 850
61d78537 851 if (!iov_iter_count(to)) {
144a6adf 852 kfree_skb(skb);
3af0bfe5 853 return 0;
61d78537 854 }
3af0bfe5 855
3b4ba04a
JW
856 if (skb)
857 goto put;
858
3af0bfe5 859 while (1) {
89cee917
JW
860 if (!noblock)
861 prepare_to_wait(sk_sleep(&q->sk), &wait,
862 TASK_INTERRUPTIBLE);
20d29d7a
AB
863
864 /* Read frames from the queue */
5990a305 865 skb = ptr_ring_consume(&q->ring);
3af0bfe5
AV
866 if (skb)
867 break;
868 if (noblock) {
869 ret = -EAGAIN;
870 break;
20d29d7a 871 }
3af0bfe5
AV
872 if (signal_pending(current)) {
873 ret = -ERESTARTSYS;
874 break;
875 }
876 /* Nothing to read, let's sleep */
877 schedule();
878 }
a499a2e9
VY
879 if (!noblock)
880 finish_wait(sk_sleep(&q->sk), &wait);
881
3b4ba04a 882put:
3af0bfe5 883 if (skb) {
635b8c8e 884 ret = tap_put_user(q, skb, to);
f51a5e82
JW
885 if (unlikely(ret < 0))
886 kfree_skb(skb);
887 else
888 consume_skb(skb);
20d29d7a 889 }
501c774c
AB
890 return ret;
891}
892
635b8c8e 893static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
501c774c
AB
894{
895 struct file *file = iocb->ki_filp;
635b8c8e 896 struct tap_queue *q = file->private_data;
3af0bfe5 897 ssize_t len = iov_iter_count(to), ret;
f758bfec
JA
898 int noblock = 0;
899
900 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
901 noblock = 1;
20d29d7a 902
f758bfec 903 ret = tap_do_read(q, to, noblock, NULL);
ce232ce0 904 ret = min_t(ssize_t, ret, len);
e6ebc7f1
ZYW
905 if (ret > 0)
906 iocb->ki_pos = ret;
20d29d7a
AB
907 return ret;
908}
909
6fe3faf8 910static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
8f475a31 911{
6fe3faf8 912 struct tap_dev *tap;
8f475a31 913
441ac0fc 914 ASSERT_RTNL();
6fe3faf8
SG
915 tap = rtnl_dereference(q->tap);
916 if (tap)
917 dev_hold(tap->dev);
8f475a31 918
6fe3faf8 919 return tap;
8f475a31
JW
920}
921
6fe3faf8 922static void tap_put_tap_dev(struct tap_dev *tap)
8f475a31 923{
6fe3faf8 924 dev_put(tap->dev);
8f475a31
JW
925}
926
635b8c8e 927static int tap_ioctl_set_queue(struct file *file, unsigned int flags)
815f236d 928{
635b8c8e 929 struct tap_queue *q = file->private_data;
6fe3faf8 930 struct tap_dev *tap;
815f236d
JW
931 int ret;
932
6fe3faf8
SG
933 tap = tap_get_tap_dev(q);
934 if (!tap)
815f236d
JW
935 return -EINVAL;
936
937 if (flags & IFF_ATTACH_QUEUE)
6fe3faf8 938 ret = tap_enable_queue(tap, file, q);
815f236d 939 else if (flags & IFF_DETACH_QUEUE)
635b8c8e 940 ret = tap_disable_queue(q);
f57855a5
JW
941 else
942 ret = -EINVAL;
815f236d 943
6fe3faf8 944 tap_put_tap_dev(tap);
815f236d
JW
945 return ret;
946}
947
635b8c8e 948static int set_offload(struct tap_queue *q, unsigned long arg)
2be5c767 949{
6fe3faf8 950 struct tap_dev *tap;
2be5c767
VY
951 netdev_features_t features;
952 netdev_features_t feature_mask = 0;
953
6fe3faf8
SG
954 tap = rtnl_dereference(q->tap);
955 if (!tap)
2be5c767
VY
956 return -ENOLINK;
957
6fe3faf8 958 features = tap->dev->features;
2be5c767
VY
959
960 if (arg & TUN_F_CSUM) {
961 feature_mask = NETIF_F_HW_CSUM;
962
963 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
964 if (arg & TUN_F_TSO_ECN)
965 feature_mask |= NETIF_F_TSO_ECN;
966 if (arg & TUN_F_TSO4)
967 feature_mask |= NETIF_F_TSO;
968 if (arg & TUN_F_TSO6)
969 feature_mask |= NETIF_F_TSO6;
970 }
399e0827
AM
971
972 /* TODO: for now USO4 and USO6 should work simultaneously */
973 if ((arg & (TUN_F_USO4 | TUN_F_USO6)) == (TUN_F_USO4 | TUN_F_USO6))
974 features |= NETIF_F_GSO_UDP_L4;
2be5c767
VY
975 }
976
977 /* tun/tap driver inverts the usage for TSO offloads, where
978 * setting the TSO bit means that the userspace wants to
979 * accept TSO frames and turning it off means that user space
980 * does not support TSO.
635b8c8e 981 * For tap, we have to invert it to mean the same thing.
2be5c767
VY
982 * When user space turns off TSO, we turn off GSO/LRO so that
983 * user-space will not receive TSO frames.
984 */
399e0827
AM
985 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6) ||
986 (feature_mask & (TUN_F_USO4 | TUN_F_USO6)) == (TUN_F_USO4 | TUN_F_USO6))
2be5c767
VY
987 features |= RX_OFFLOADS;
988 else
989 features &= ~RX_OFFLOADS;
990
991 /* tap_features are the same as features on tun/tap and
992 * reflect user expectations.
993 */
6fe3faf8
SG
994 tap->tap_features = feature_mask;
995 if (tap->update_features)
996 tap->update_features(tap, features);
2be5c767
VY
997
998 return 0;
999}
1000
20d29d7a
AB
1001/*
1002 * provide compatibility with generic tun/tap interface
1003 */
635b8c8e
SG
1004static long tap_ioctl(struct file *file, unsigned int cmd,
1005 unsigned long arg)
20d29d7a 1006{
635b8c8e 1007 struct tap_queue *q = file->private_data;
6fe3faf8 1008 struct tap_dev *tap;
20d29d7a
AB
1009 void __user *argp = (void __user *)arg;
1010 struct ifreq __user *ifr = argp;
1011 unsigned int __user *up = argp;
39ec7de7 1012 unsigned short u;
55afbd08 1013 int __user *sp = argp;
7f460d30 1014 struct sockaddr sa;
55afbd08 1015 int s;
02df55d2 1016 int ret;
20d29d7a
AB
1017
1018 switch (cmd) {
1019 case TUNSETIFF:
1020 /* ignore the name, just look at flags */
1021 if (get_user(u, &ifr->ifr_flags))
1022 return -EFAULT;
b9fb9ee0
AB
1023
1024 ret = 0;
635b8c8e 1025 if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP))
b9fb9ee0
AB
1026 ret = -EINVAL;
1027 else
635b8c8e 1028 q->flags = (q->flags & ~TAP_IFFEATURES) | u;
b9fb9ee0
AB
1029
1030 return ret;
20d29d7a
AB
1031
1032 case TUNGETIFF:
441ac0fc 1033 rtnl_lock();
6fe3faf8
SG
1034 tap = tap_get_tap_dev(q);
1035 if (!tap) {
441ac0fc 1036 rtnl_unlock();
20d29d7a 1037 return -ENOLINK;
441ac0fc 1038 }
20d29d7a 1039
02df55d2 1040 ret = 0;
39ec7de7 1041 u = q->flags;
6fe3faf8 1042 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
39ec7de7 1043 put_user(u, &ifr->ifr_flags))
02df55d2 1044 ret = -EFAULT;
6fe3faf8 1045 tap_put_tap_dev(tap);
441ac0fc 1046 rtnl_unlock();
02df55d2 1047 return ret;
20d29d7a 1048
815f236d
JW
1049 case TUNSETQUEUE:
1050 if (get_user(u, &ifr->ifr_flags))
1051 return -EFAULT;
441ac0fc 1052 rtnl_lock();
635b8c8e 1053 ret = tap_ioctl_set_queue(file, u);
441ac0fc 1054 rtnl_unlock();
82a19eb8 1055 return ret;
815f236d 1056
20d29d7a 1057 case TUNGETFEATURES:
635b8c8e 1058 if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up))
20d29d7a
AB
1059 return -EFAULT;
1060 return 0;
1061
1062 case TUNSETSNDBUF:
3ea79249 1063 if (get_user(s, sp))
20d29d7a 1064 return -EFAULT;
93161922
CG
1065 if (s <= 0)
1066 return -EINVAL;
20d29d7a 1067
3ea79249 1068 q->sk.sk_sndbuf = s;
20d29d7a
AB
1069 return 0;
1070
55afbd08
MT
1071 case TUNGETVNETHDRSZ:
1072 s = q->vnet_hdr_sz;
1073 if (put_user(s, sp))
1074 return -EFAULT;
1075 return 0;
1076
1077 case TUNSETVNETHDRSZ:
1078 if (get_user(s, sp))
1079 return -EFAULT;
1080 if (s < (int)sizeof(struct virtio_net_hdr))
1081 return -EINVAL;
1082
1083 q->vnet_hdr_sz = s;
1084 return 0;
1085
01b07fb3 1086 case TUNGETVNETLE:
635b8c8e 1087 s = !!(q->flags & TAP_VNET_LE);
01b07fb3
MT
1088 if (put_user(s, sp))
1089 return -EFAULT;
1090 return 0;
1091
1092 case TUNSETVNETLE:
1093 if (get_user(s, sp))
1094 return -EFAULT;
1095 if (s)
635b8c8e 1096 q->flags |= TAP_VNET_LE;
01b07fb3 1097 else
635b8c8e 1098 q->flags &= ~TAP_VNET_LE;
01b07fb3
MT
1099 return 0;
1100
8b8e658b 1101 case TUNGETVNETBE:
635b8c8e 1102 return tap_get_vnet_be(q, sp);
8b8e658b
GK
1103
1104 case TUNSETVNETBE:
635b8c8e 1105 return tap_set_vnet_be(q, sp);
8b8e658b 1106
20d29d7a
AB
1107 case TUNSETOFFLOAD:
1108 /* let the user check for future flags */
1109 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
399e0827
AM
1110 TUN_F_TSO_ECN | TUN_F_UFO |
1111 TUN_F_USO4 | TUN_F_USO6))
20d29d7a
AB
1112 return -EINVAL;
1113
2be5c767
VY
1114 rtnl_lock();
1115 ret = set_offload(q, arg);
1116 rtnl_unlock();
1117 return ret;
20d29d7a 1118
b5082083
JC
1119 case SIOCGIFHWADDR:
1120 rtnl_lock();
6fe3faf8
SG
1121 tap = tap_get_tap_dev(q);
1122 if (!tap) {
b5082083
JC
1123 rtnl_unlock();
1124 return -ENOLINK;
1125 }
1126 ret = 0;
3b23a32a 1127 dev_get_mac_address(&sa, dev_net(tap->dev), tap->dev->name);
6fe3faf8 1128 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
3b23a32a 1129 copy_to_user(&ifr->ifr_hwaddr, &sa, sizeof(sa)))
b5082083 1130 ret = -EFAULT;
6fe3faf8 1131 tap_put_tap_dev(tap);
b5082083
JC
1132 rtnl_unlock();
1133 return ret;
1134
1135 case SIOCSIFHWADDR:
7f460d30
JC
1136 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1137 return -EFAULT;
b5082083 1138 rtnl_lock();
6fe3faf8
SG
1139 tap = tap_get_tap_dev(q);
1140 if (!tap) {
b5082083
JC
1141 rtnl_unlock();
1142 return -ENOLINK;
1143 }
3b23a32a 1144 ret = dev_set_mac_address_user(tap->dev, &sa, NULL);
6fe3faf8 1145 tap_put_tap_dev(tap);
b5082083
JC
1146 rtnl_unlock();
1147 return ret;
1148
20d29d7a
AB
1149 default:
1150 return -EINVAL;
1151 }
1152}
1153
d17eb73b 1154static const struct file_operations tap_fops = {
20d29d7a 1155 .owner = THIS_MODULE,
635b8c8e
SG
1156 .open = tap_open,
1157 .release = tap_release,
1158 .read_iter = tap_read_iter,
1159 .write_iter = tap_write_iter,
1160 .poll = tap_poll,
20d29d7a 1161 .llseek = no_llseek,
635b8c8e 1162 .unlocked_ioctl = tap_ioctl,
407e9ef7 1163 .compat_ioctl = compat_ptr_ioctl,
20d29d7a
AB
1164};
1165
0efac277
JW
1166static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
1167{
1168 struct tun_xdp_hdr *hdr = xdp->data_hard_start;
1169 struct virtio_net_hdr *gso = &hdr->gso;
1170 int buflen = hdr->buflen;
1171 int vnet_hdr_len = 0;
1172 struct tap_dev *tap;
1173 struct sk_buff *skb;
1174 int err, depth;
1175
1176 if (q->flags & IFF_VNET_HDR)
1177 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
1178
1179 skb = build_skb(xdp->data_hard_start, buflen);
1180 if (!skb) {
1181 err = -ENOMEM;
1182 goto err;
1183 }
1184
1185 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1186 skb_put(skb, xdp->data_end - xdp->data);
1187
1188 skb_set_network_header(skb, ETH_HLEN);
1189 skb_reset_mac_header(skb);
1190 skb->protocol = eth_hdr(skb)->h_proto;
1191
1192 if (vnet_hdr_len) {
1193 err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q));
1194 if (err)
1195 goto err_kfree;
1196 }
1197
0efac277 1198 /* Move network header to the right position for VLAN tagged packets */
b69df260 1199 if (eth_type_vlan(skb->protocol) &&
0efac277
JW
1200 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
1201 skb_set_network_header(skb, depth);
1202
1203 rcu_read_lock();
1204 tap = rcu_dereference(q->tap);
1205 if (tap) {
1206 skb->dev = tap->dev;
d2aa125d 1207 skb_probe_transport_header(skb);
0efac277
JW
1208 dev_queue_xmit(skb);
1209 } else {
1210 kfree_skb(skb);
1211 }
1212 rcu_read_unlock();
1213
1214 return 0;
1215
1216err_kfree:
1217 kfree_skb(skb);
1218err:
1219 rcu_read_lock();
faeacb6d 1220 tap = rcu_dereference(q->tap);
0efac277
JW
1221 if (tap && tap->count_tx_dropped)
1222 tap->count_tx_dropped(tap);
1223 rcu_read_unlock();
1224 return err;
1225}
1226
635b8c8e
SG
1227static int tap_sendmsg(struct socket *sock, struct msghdr *m,
1228 size_t total_len)
501c774c 1229{
635b8c8e 1230 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
fe8dd45b 1231 struct tun_msg_ctl *ctl = m->msg_control;
0efac277
JW
1232 struct xdp_buff *xdp;
1233 int i;
fe8dd45b 1234
74a335a0
HH
1235 if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
1236 ctl && ctl->type == TUN_MSG_PTR) {
0efac277
JW
1237 for (i = 0; i < ctl->num; i++) {
1238 xdp = &((struct xdp_buff *)ctl->ptr)[i];
1239 tap_get_user_xdp(q, xdp);
1240 }
1241 return 0;
1242 }
fe8dd45b
JW
1243
1244 return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter,
1245 m->msg_flags & MSG_DONTWAIT);
501c774c
AB
1246}
1247
635b8c8e
SG
1248static int tap_recvmsg(struct socket *sock, struct msghdr *m,
1249 size_t total_len, int flags)
501c774c 1250{
635b8c8e 1251 struct tap_queue *q = container_of(sock, struct tap_queue, sock);
61d78537 1252 struct sk_buff *skb = m->msg_control;
501c774c 1253 int ret;
61d78537 1254 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
144a6adf 1255 kfree_skb(skb);
501c774c 1256 return -EINVAL;
61d78537
WX
1257 }
1258 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
de2aa476
DM
1259 if (ret > total_len) {
1260 m->msg_flags |= MSG_TRUNC;
1261 ret = flags & MSG_TRUNC ? ret : total_len;
1262 }
501c774c
AB
1263 return ret;
1264}
1265
635b8c8e 1266static int tap_peek_len(struct socket *sock)
362899b8 1267{
635b8c8e 1268 struct tap_queue *q = container_of(sock, struct tap_queue,
362899b8 1269 sock);
5990a305 1270 return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
362899b8
JW
1271}
1272
501c774c 1273/* Ops structure to mimic raw sockets with tun */
635b8c8e
SG
1274static const struct proto_ops tap_socket_ops = {
1275 .sendmsg = tap_sendmsg,
1276 .recvmsg = tap_recvmsg,
1277 .peek_len = tap_peek_len,
501c774c
AB
1278};
1279
1280/* Get an underlying socket object from tun file. Returns error unless file is
1281 * attached to a device. The returned object works like a packet socket, it
1282 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1283 * holding a reference to the file for as long as the socket is in use. */
635b8c8e 1284struct socket *tap_get_socket(struct file *file)
501c774c 1285{
635b8c8e
SG
1286 struct tap_queue *q;
1287 if (file->f_op != &tap_fops)
501c774c
AB
1288 return ERR_PTR(-EINVAL);
1289 q = file->private_data;
1290 if (!q)
1291 return ERR_PTR(-EBADFD);
1292 return &q->sock;
1293}
635b8c8e 1294EXPORT_SYMBOL_GPL(tap_get_socket);
501c774c 1295
5990a305 1296struct ptr_ring *tap_get_ptr_ring(struct file *file)
49f96fd0
JW
1297{
1298 struct tap_queue *q;
1299
1300 if (file->f_op != &tap_fops)
1301 return ERR_PTR(-EINVAL);
1302 q = file->private_data;
1303 if (!q)
1304 return ERR_PTR(-EBADFD);
5990a305 1305 return &q->ring;
49f96fd0 1306}
5990a305 1307EXPORT_SYMBOL_GPL(tap_get_ptr_ring);
49f96fd0 1308
6fe3faf8 1309int tap_queue_resize(struct tap_dev *tap)
362899b8 1310{
6fe3faf8 1311 struct net_device *dev = tap->dev;
635b8c8e 1312 struct tap_queue *q;
5990a305 1313 struct ptr_ring **rings;
6fe3faf8 1314 int n = tap->numqueues;
362899b8
JW
1315 int ret, i = 0;
1316
5990a305
JW
1317 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
1318 if (!rings)
362899b8
JW
1319 return -ENOMEM;
1320
6fe3faf8 1321 list_for_each_entry(q, &tap->queue_list, next)
5990a305 1322 rings[i++] = &q->ring;
362899b8 1323
5990a305
JW
1324 ret = ptr_ring_resize_multiple(rings, n,
1325 dev->tx_queue_len, GFP_KERNEL,
1326 __skb_array_destroy_skb);
362899b8 1327
5990a305 1328 kfree(rings);
362899b8
JW
1329 return ret;
1330}
9a393b5d 1331EXPORT_SYMBOL_GPL(tap_queue_resize);
ebc05ba7 1332
d9f1f61c
SG
1333static int tap_list_add(dev_t major, const char *device_name)
1334{
1335 struct major_info *tap_major;
1336
1337 tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC);
1338 if (!tap_major)
1339 return -ENOMEM;
1340
1341 tap_major->major = MAJOR(major);
1342
1343 idr_init(&tap_major->minor_idr);
ffa423fb 1344 spin_lock_init(&tap_major->minor_lock);
d9f1f61c
SG
1345
1346 tap_major->device_name = device_name;
1347
1348 list_add_tail_rcu(&tap_major->next, &major_list);
1349 return 0;
1350}
1351
dea6e19f
GM
1352int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
1353 const char *device_name, struct module *module)
ebc05ba7
SG
1354{
1355 int err;
1356
1357 err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name);
1358 if (err)
1359 goto out1;
1360
1361 cdev_init(tap_cdev, &tap_fops);
dea6e19f 1362 tap_cdev->owner = module;
ebc05ba7
SG
1363 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
1364 if (err)
1365 goto out2;
1366
d9f1f61c
SG
1367 err = tap_list_add(*tap_major, device_name);
1368 if (err)
1369 goto out3;
ebc05ba7
SG
1370
1371 return 0;
1372
d9f1f61c
SG
1373out3:
1374 cdev_del(tap_cdev);
ebc05ba7
SG
1375out2:
1376 unregister_chrdev_region(*tap_major, TAP_NUM_DEVS);
1377out1:
1378 return err;
1379}
9a393b5d 1380EXPORT_SYMBOL_GPL(tap_create_cdev);
ebc05ba7
SG
1381
1382void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev)
1383{
d9f1f61c
SG
1384 struct major_info *tap_major, *tmp;
1385
ebc05ba7
SG
1386 cdev_del(tap_cdev);
1387 unregister_chrdev_region(major, TAP_NUM_DEVS);
d9f1f61c
SG
1388 list_for_each_entry_safe(tap_major, tmp, &major_list, next) {
1389 if (tap_major->major == MAJOR(major)) {
1390 idr_destroy(&tap_major->minor_idr);
1391 list_del_rcu(&tap_major->next);
1392 kfree_rcu(tap_major, rcu);
1393 }
1394 }
ebc05ba7 1395}
9a393b5d
SG
1396EXPORT_SYMBOL_GPL(tap_destroy_cdev);
1397
1398MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1399MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
1400MODULE_LICENSE("GPL");