Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux-2.6-block.git] / net / bluetooth / hci_sock.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
8c520a59 27#include <linux/export.h>
1da177e4
LT
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
cd82e61c 32#include <net/bluetooth/hci_mon.h>
fa4335d7
JH
33#include <net/bluetooth/mgmt.h>
34
35#include "mgmt_util.h"
1da177e4 36
801c1e8d
JH
37static LIST_HEAD(mgmt_chan_list);
38static DEFINE_MUTEX(mgmt_chan_list_lock);
39
cd82e61c
MH
40static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
1da177e4
LT
42/* ----- HCI socket interface ----- */
43
863def58
MH
44/* Socket info */
45#define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47struct hci_pinfo {
48 struct bt_sock bt;
49 struct hci_dev *hdev;
50 struct hci_filter filter;
51 __u32 cmsg_mask;
52 unsigned short channel;
6befc644 53 unsigned long flags;
863def58
MH
54};
55
6befc644
MH
56void hci_sock_set_flag(struct sock *sk, int nr)
57{
58 set_bit(nr, &hci_pi(sk)->flags);
59}
60
61void hci_sock_clear_flag(struct sock *sk, int nr)
62{
63 clear_bit(nr, &hci_pi(sk)->flags);
64}
65
c85be545
MH
66int hci_sock_test_flag(struct sock *sk, int nr)
67{
68 return test_bit(nr, &hci_pi(sk)->flags);
69}
70
d0f172b1
JH
71unsigned short hci_sock_get_channel(struct sock *sk)
72{
73 return hci_pi(sk)->channel;
74}
75
9391976a 76static inline int hci_test_bit(int nr, const void *addr)
1da177e4 77{
9391976a 78 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
1da177e4
LT
79}
80
81/* Security filter */
3ad254f7
MH
82#define HCI_SFLT_MAX_OGF 5
83
84struct hci_sec_filter {
85 __u32 type_mask;
86 __u32 event_mask[2];
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88};
89
7e67c112 90static const struct hci_sec_filter hci_sec_filter = {
1da177e4
LT
91 /* Packet types */
92 0x10,
93 /* Events */
dd7f5527 94 { 0x1000d9fe, 0x0000b00c },
1da177e4
LT
95 /* Commands */
96 {
97 { 0x0 },
98 /* OGF_LINK_CTL */
7c631a67 99 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
1da177e4 100 /* OGF_LINK_POLICY */
7c631a67 101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
1da177e4 102 /* OGF_HOST_CTL */
7c631a67 103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
1da177e4 104 /* OGF_INFO_PARAM */
7c631a67 105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
1da177e4 106 /* OGF_STATUS_PARAM */
7c631a67 107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
1da177e4
LT
108 }
109};
110
111static struct bt_sock_list hci_sk_list = {
d5fb2962 112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
1da177e4
LT
113};
114
f81fe64f
MH
115static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116{
117 struct hci_filter *flt;
118 int flt_type, flt_event;
119
120 /* Apply filter */
121 flt = &hci_pi(sk)->filter;
122
123 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
124 flt_type = 0;
125 else
126 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
127
128 if (!test_bit(flt_type, &flt->type_mask))
129 return true;
130
131 /* Extra filter for event packets only */
132 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
133 return false;
134
135 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
136
137 if (!hci_test_bit(flt_event, &flt->event_mask))
138 return true;
139
140 /* Check filter only when opcode is set */
141 if (!flt->opcode)
142 return false;
143
144 if (flt_event == HCI_EV_CMD_COMPLETE &&
145 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
146 return true;
147
148 if (flt_event == HCI_EV_CMD_STATUS &&
149 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
150 return true;
151
152 return false;
153}
154
1da177e4 155/* Send frame to RAW socket */
470fe1b5 156void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
157{
158 struct sock *sk;
e0edf373 159 struct sk_buff *skb_copy = NULL;
1da177e4
LT
160
161 BT_DBG("hdev %p len %d", hdev, skb->len);
162
163 read_lock(&hci_sk_list.lock);
470fe1b5 164
b67bfe0d 165 sk_for_each(sk, &hci_sk_list.head) {
1da177e4
LT
166 struct sk_buff *nskb;
167
168 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
169 continue;
170
171 /* Don't send frame to the socket it came from */
172 if (skb->sk == sk)
173 continue;
174
23500189
MH
175 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
176 if (is_filtered_packet(sk, skb))
177 continue;
178 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
179 if (!bt_cb(skb)->incoming)
180 continue;
181 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
182 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
183 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
184 continue;
185 } else {
186 /* Don't send frame to other channel types */
1da177e4 187 continue;
23500189 188 }
1da177e4 189
e0edf373
MH
190 if (!skb_copy) {
191 /* Create a private copy with headroom */
bad93e9d 192 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
e0edf373
MH
193 if (!skb_copy)
194 continue;
195
196 /* Put type byte before the data */
197 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
198 }
199
200 nskb = skb_clone(skb_copy, GFP_ATOMIC);
70f23020 201 if (!nskb)
1da177e4
LT
202 continue;
203
470fe1b5
MH
204 if (sock_queue_rcv_skb(sk, nskb))
205 kfree_skb(nskb);
206 }
207
208 read_unlock(&hci_sk_list.lock);
e0edf373
MH
209
210 kfree_skb(skb_copy);
470fe1b5
MH
211}
212
7129069e
JH
213/* Send frame to sockets with specific channel */
214void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
c08b1a1d 215 int flag, struct sock *skip_sk)
470fe1b5
MH
216{
217 struct sock *sk;
470fe1b5 218
7129069e 219 BT_DBG("channel %u len %d", channel, skb->len);
470fe1b5
MH
220
221 read_lock(&hci_sk_list.lock);
222
b67bfe0d 223 sk_for_each(sk, &hci_sk_list.head) {
470fe1b5
MH
224 struct sk_buff *nskb;
225
c08b1a1d 226 /* Ignore socket without the flag set */
c85be545 227 if (!hci_sock_test_flag(sk, flag))
d7f72f61
MH
228 continue;
229
c08b1a1d
MH
230 /* Skip the original socket */
231 if (sk == skip_sk)
17711c62
MH
232 continue;
233
234 if (sk->sk_state != BT_BOUND)
235 continue;
236
237 if (hci_pi(sk)->channel != channel)
238 continue;
239
240 nskb = skb_clone(skb, GFP_ATOMIC);
241 if (!nskb)
242 continue;
243
244 if (sock_queue_rcv_skb(sk, nskb))
245 kfree_skb(nskb);
246 }
247
248 read_unlock(&hci_sk_list.lock);
249}
250
cd82e61c
MH
251/* Send frame to monitor socket */
252void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
253{
cd82e61c 254 struct sk_buff *skb_copy = NULL;
2b531294 255 struct hci_mon_hdr *hdr;
cd82e61c
MH
256 __le16 opcode;
257
258 if (!atomic_read(&monitor_promisc))
259 return;
260
261 BT_DBG("hdev %p len %d", hdev, skb->len);
262
263 switch (bt_cb(skb)->pkt_type) {
264 case HCI_COMMAND_PKT:
dcf4adbf 265 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
cd82e61c
MH
266 break;
267 case HCI_EVENT_PKT:
dcf4adbf 268 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
cd82e61c
MH
269 break;
270 case HCI_ACLDATA_PKT:
271 if (bt_cb(skb)->incoming)
dcf4adbf 272 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
cd82e61c 273 else
dcf4adbf 274 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
cd82e61c
MH
275 break;
276 case HCI_SCODATA_PKT:
277 if (bt_cb(skb)->incoming)
dcf4adbf 278 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
cd82e61c 279 else
dcf4adbf 280 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
cd82e61c
MH
281 break;
282 default:
283 return;
284 }
285
2b531294
MH
286 /* Create a private copy with headroom */
287 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
288 if (!skb_copy)
289 return;
290
291 /* Put header before the data */
292 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
293 hdr->opcode = opcode;
294 hdr->index = cpu_to_le16(hdev->id);
295 hdr->len = cpu_to_le16(skb->len);
296
c08b1a1d
MH
297 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
298 HCI_SOCK_TRUSTED, NULL);
cd82e61c
MH
299 kfree_skb(skb_copy);
300}
301
cd82e61c
MH
302static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
303{
304 struct hci_mon_hdr *hdr;
305 struct hci_mon_new_index *ni;
306 struct sk_buff *skb;
307 __le16 opcode;
308
309 switch (event) {
310 case HCI_DEV_REG:
311 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
312 if (!skb)
313 return NULL;
314
315 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
316 ni->type = hdev->dev_type;
317 ni->bus = hdev->bus;
318 bacpy(&ni->bdaddr, &hdev->bdaddr);
319 memcpy(ni->name, hdev->name, 8);
320
dcf4adbf 321 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
cd82e61c
MH
322 break;
323
324 case HCI_DEV_UNREG:
325 skb = bt_skb_alloc(0, GFP_ATOMIC);
326 if (!skb)
327 return NULL;
328
dcf4adbf 329 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
cd82e61c
MH
330 break;
331
332 default:
333 return NULL;
334 }
335
336 __net_timestamp(skb);
337
338 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
339 hdr->opcode = opcode;
340 hdr->index = cpu_to_le16(hdev->id);
341 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
342
343 return skb;
344}
345
346static void send_monitor_replay(struct sock *sk)
347{
348 struct hci_dev *hdev;
349
350 read_lock(&hci_dev_list_lock);
351
352 list_for_each_entry(hdev, &hci_dev_list, list) {
353 struct sk_buff *skb;
354
355 skb = create_monitor_event(hdev, HCI_DEV_REG);
356 if (!skb)
357 continue;
358
359 if (sock_queue_rcv_skb(sk, skb))
360 kfree_skb(skb);
361 }
362
363 read_unlock(&hci_dev_list_lock);
364}
365
040030ef
MH
366/* Generate internal stack event */
367static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
368{
369 struct hci_event_hdr *hdr;
370 struct hci_ev_stack_internal *ev;
371 struct sk_buff *skb;
372
373 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
374 if (!skb)
375 return;
376
377 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
378 hdr->evt = HCI_EV_STACK_INTERNAL;
379 hdr->plen = sizeof(*ev) + dlen;
380
381 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
382 ev->type = type;
383 memcpy(ev->data, data, dlen);
384
385 bt_cb(skb)->incoming = 1;
386 __net_timestamp(skb);
387
388 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
040030ef
MH
389 hci_send_to_sock(hdev, skb);
390 kfree_skb(skb);
391}
392
393void hci_sock_dev_event(struct hci_dev *hdev, int event)
394{
395 struct hci_ev_si_device ev;
396
397 BT_DBG("hdev %s event %d", hdev->name, event);
398
cd82e61c
MH
399 /* Send event to monitor */
400 if (atomic_read(&monitor_promisc)) {
401 struct sk_buff *skb;
402
403 skb = create_monitor_event(hdev, event);
404 if (skb) {
c08b1a1d
MH
405 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
406 HCI_SOCK_TRUSTED, NULL);
cd82e61c
MH
407 kfree_skb(skb);
408 }
409 }
410
040030ef
MH
411 /* Send event to sockets */
412 ev.event = event;
413 ev.dev_id = hdev->id;
414 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
415
416 if (event == HCI_DEV_UNREG) {
417 struct sock *sk;
040030ef
MH
418
419 /* Detach sockets from device */
420 read_lock(&hci_sk_list.lock);
b67bfe0d 421 sk_for_each(sk, &hci_sk_list.head) {
040030ef
MH
422 bh_lock_sock_nested(sk);
423 if (hci_pi(sk)->hdev == hdev) {
424 hci_pi(sk)->hdev = NULL;
425 sk->sk_err = EPIPE;
426 sk->sk_state = BT_OPEN;
427 sk->sk_state_change(sk);
428
429 hci_dev_put(hdev);
430 }
431 bh_unlock_sock(sk);
432 }
433 read_unlock(&hci_sk_list.lock);
434 }
435}
436
801c1e8d
JH
437static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
438{
439 struct hci_mgmt_chan *c;
440
441 list_for_each_entry(c, &mgmt_chan_list, list) {
442 if (c->channel == channel)
443 return c;
444 }
445
446 return NULL;
447}
448
449static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
450{
451 struct hci_mgmt_chan *c;
452
453 mutex_lock(&mgmt_chan_list_lock);
454 c = __hci_mgmt_chan_find(channel);
455 mutex_unlock(&mgmt_chan_list_lock);
456
457 return c;
458}
459
460int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
461{
462 if (c->channel < HCI_CHANNEL_CONTROL)
463 return -EINVAL;
464
465 mutex_lock(&mgmt_chan_list_lock);
466 if (__hci_mgmt_chan_find(c->channel)) {
467 mutex_unlock(&mgmt_chan_list_lock);
468 return -EALREADY;
469 }
470
471 list_add_tail(&c->list, &mgmt_chan_list);
472
473 mutex_unlock(&mgmt_chan_list_lock);
474
475 return 0;
476}
477EXPORT_SYMBOL(hci_mgmt_chan_register);
478
479void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
480{
481 mutex_lock(&mgmt_chan_list_lock);
482 list_del(&c->list);
483 mutex_unlock(&mgmt_chan_list_lock);
484}
485EXPORT_SYMBOL(hci_mgmt_chan_unregister);
486
1da177e4
LT
487static int hci_sock_release(struct socket *sock)
488{
489 struct sock *sk = sock->sk;
7b005bd3 490 struct hci_dev *hdev;
1da177e4
LT
491
492 BT_DBG("sock %p sk %p", sock, sk);
493
494 if (!sk)
495 return 0;
496
7b005bd3
MH
497 hdev = hci_pi(sk)->hdev;
498
cd82e61c
MH
499 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
500 atomic_dec(&monitor_promisc);
501
1da177e4
LT
502 bt_sock_unlink(&hci_sk_list, sk);
503
504 if (hdev) {
23500189 505 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
23500189 506 hci_dev_close(hdev->id);
9380f9ea
LP
507 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
508 mgmt_index_added(hdev);
23500189
MH
509 }
510
1da177e4
LT
511 atomic_dec(&hdev->promisc);
512 hci_dev_put(hdev);
513 }
514
515 sock_orphan(sk);
516
517 skb_queue_purge(&sk->sk_receive_queue);
518 skb_queue_purge(&sk->sk_write_queue);
519
520 sock_put(sk);
521 return 0;
522}
523
b2a66aad 524static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
f0358568
JH
525{
526 bdaddr_t bdaddr;
5e762444 527 int err;
f0358568
JH
528
529 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
530 return -EFAULT;
531
09fd0de5 532 hci_dev_lock(hdev);
5e762444 533
dcc36c16 534 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
5e762444 535
09fd0de5 536 hci_dev_unlock(hdev);
5e762444
AJ
537
538 return err;
f0358568
JH
539}
540
b2a66aad 541static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
f0358568
JH
542{
543 bdaddr_t bdaddr;
5e762444 544 int err;
f0358568
JH
545
546 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
547 return -EFAULT;
548
09fd0de5 549 hci_dev_lock(hdev);
5e762444 550
dcc36c16 551 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
5e762444 552
09fd0de5 553 hci_dev_unlock(hdev);
5e762444
AJ
554
555 return err;
f0358568
JH
556}
557
8e87d142 558/* Ioctls that require bound socket */
6039aa73
GP
559static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
560 unsigned long arg)
1da177e4
LT
561{
562 struct hci_dev *hdev = hci_pi(sk)->hdev;
563
564 if (!hdev)
565 return -EBADFD;
566
d7a5a11d 567 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
568 return -EBUSY;
569
d7a5a11d 570 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
fee746b0
MH
571 return -EOPNOTSUPP;
572
5b69bef5
MH
573 if (hdev->dev_type != HCI_BREDR)
574 return -EOPNOTSUPP;
575
1da177e4
LT
576 switch (cmd) {
577 case HCISETRAW:
578 if (!capable(CAP_NET_ADMIN))
bf5b30b8 579 return -EPERM;
db596681 580 return -EOPNOTSUPP;
1da177e4 581
1da177e4 582 case HCIGETCONNINFO:
40be492f
MH
583 return hci_get_conn_info(hdev, (void __user *) arg);
584
585 case HCIGETAUTHINFO:
586 return hci_get_auth_info(hdev, (void __user *) arg);
1da177e4 587
f0358568
JH
588 case HCIBLOCKADDR:
589 if (!capable(CAP_NET_ADMIN))
bf5b30b8 590 return -EPERM;
b2a66aad 591 return hci_sock_blacklist_add(hdev, (void __user *) arg);
f0358568
JH
592
593 case HCIUNBLOCKADDR:
594 if (!capable(CAP_NET_ADMIN))
bf5b30b8 595 return -EPERM;
b2a66aad 596 return hci_sock_blacklist_del(hdev, (void __user *) arg);
1da177e4 597 }
0736cfa8 598
324d36ed 599 return -ENOIOCTLCMD;
1da177e4
LT
600}
601
8fc9ced3
GP
602static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
603 unsigned long arg)
1da177e4 604{
40be492f 605 void __user *argp = (void __user *) arg;
0736cfa8 606 struct sock *sk = sock->sk;
1da177e4
LT
607 int err;
608
609 BT_DBG("cmd %x arg %lx", cmd, arg);
610
c1c4f956
MH
611 lock_sock(sk);
612
613 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
614 err = -EBADFD;
615 goto done;
616 }
617
618 release_sock(sk);
619
1da177e4
LT
620 switch (cmd) {
621 case HCIGETDEVLIST:
622 return hci_get_dev_list(argp);
623
624 case HCIGETDEVINFO:
625 return hci_get_dev_info(argp);
626
627 case HCIGETCONNLIST:
628 return hci_get_conn_list(argp);
629
630 case HCIDEVUP:
631 if (!capable(CAP_NET_ADMIN))
bf5b30b8 632 return -EPERM;
1da177e4
LT
633 return hci_dev_open(arg);
634
635 case HCIDEVDOWN:
636 if (!capable(CAP_NET_ADMIN))
bf5b30b8 637 return -EPERM;
1da177e4
LT
638 return hci_dev_close(arg);
639
640 case HCIDEVRESET:
641 if (!capable(CAP_NET_ADMIN))
bf5b30b8 642 return -EPERM;
1da177e4
LT
643 return hci_dev_reset(arg);
644
645 case HCIDEVRESTAT:
646 if (!capable(CAP_NET_ADMIN))
bf5b30b8 647 return -EPERM;
1da177e4
LT
648 return hci_dev_reset_stat(arg);
649
650 case HCISETSCAN:
651 case HCISETAUTH:
652 case HCISETENCRYPT:
653 case HCISETPTYPE:
654 case HCISETLINKPOL:
655 case HCISETLINKMODE:
656 case HCISETACLMTU:
657 case HCISETSCOMTU:
658 if (!capable(CAP_NET_ADMIN))
bf5b30b8 659 return -EPERM;
1da177e4
LT
660 return hci_dev_cmd(cmd, argp);
661
662 case HCIINQUIRY:
663 return hci_inquiry(argp);
1da177e4 664 }
c1c4f956
MH
665
666 lock_sock(sk);
667
668 err = hci_sock_bound_ioctl(sk, cmd, arg);
669
670done:
671 release_sock(sk);
672 return err;
1da177e4
LT
673}
674
8fc9ced3
GP
675static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
676 int addr_len)
1da177e4 677{
0381101f 678 struct sockaddr_hci haddr;
1da177e4
LT
679 struct sock *sk = sock->sk;
680 struct hci_dev *hdev = NULL;
0381101f 681 int len, err = 0;
1da177e4
LT
682
683 BT_DBG("sock %p sk %p", sock, sk);
684
0381101f
JH
685 if (!addr)
686 return -EINVAL;
687
688 memset(&haddr, 0, sizeof(haddr));
689 len = min_t(unsigned int, sizeof(haddr), addr_len);
690 memcpy(&haddr, addr, len);
691
692 if (haddr.hci_family != AF_BLUETOOTH)
693 return -EINVAL;
694
1da177e4
LT
695 lock_sock(sk);
696
7cc2ade2 697 if (sk->sk_state == BT_BOUND) {
1da177e4
LT
698 err = -EALREADY;
699 goto done;
700 }
701
7cc2ade2
MH
702 switch (haddr.hci_channel) {
703 case HCI_CHANNEL_RAW:
704 if (hci_pi(sk)->hdev) {
705 err = -EALREADY;
1da177e4
LT
706 goto done;
707 }
708
7cc2ade2
MH
709 if (haddr.hci_dev != HCI_DEV_NONE) {
710 hdev = hci_dev_get(haddr.hci_dev);
711 if (!hdev) {
712 err = -ENODEV;
713 goto done;
714 }
715
716 atomic_inc(&hdev->promisc);
717 }
718
719 hci_pi(sk)->hdev = hdev;
720 break;
721
23500189
MH
722 case HCI_CHANNEL_USER:
723 if (hci_pi(sk)->hdev) {
724 err = -EALREADY;
725 goto done;
726 }
727
728 if (haddr.hci_dev == HCI_DEV_NONE) {
729 err = -EINVAL;
730 goto done;
731 }
732
10a8b86f 733 if (!capable(CAP_NET_ADMIN)) {
23500189
MH
734 err = -EPERM;
735 goto done;
736 }
737
738 hdev = hci_dev_get(haddr.hci_dev);
739 if (!hdev) {
740 err = -ENODEV;
741 goto done;
742 }
743
781f899f 744 if (test_bit(HCI_INIT, &hdev->flags) ||
d7a5a11d 745 hci_dev_test_flag(hdev, HCI_SETUP) ||
781f899f
MH
746 hci_dev_test_flag(hdev, HCI_CONFIG) ||
747 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
748 test_bit(HCI_UP, &hdev->flags))) {
23500189
MH
749 err = -EBUSY;
750 hci_dev_put(hdev);
751 goto done;
752 }
753
238be788 754 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
23500189
MH
755 err = -EUSERS;
756 hci_dev_put(hdev);
757 goto done;
758 }
759
0602a8ad 760 mgmt_index_removed(hdev);
23500189
MH
761
762 err = hci_dev_open(hdev->id);
763 if (err) {
781f899f
MH
764 if (err == -EALREADY) {
765 /* In case the transport is already up and
766 * running, clear the error here.
767 *
768 * This can happen when opening an user
769 * channel and HCI_AUTO_OFF grace period
770 * is still active.
771 */
772 err = 0;
773 } else {
774 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
775 mgmt_index_added(hdev);
776 hci_dev_put(hdev);
777 goto done;
778 }
23500189
MH
779 }
780
781 atomic_inc(&hdev->promisc);
782
783 hci_pi(sk)->hdev = hdev;
784 break;
785
cd82e61c
MH
786 case HCI_CHANNEL_MONITOR:
787 if (haddr.hci_dev != HCI_DEV_NONE) {
788 err = -EINVAL;
789 goto done;
790 }
791
792 if (!capable(CAP_NET_RAW)) {
793 err = -EPERM;
794 goto done;
795 }
796
50ebc055
MH
797 /* The monitor interface is restricted to CAP_NET_RAW
798 * capabilities and with that implicitly trusted.
799 */
800 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
801
cd82e61c
MH
802 send_monitor_replay(sk);
803
804 atomic_inc(&monitor_promisc);
805 break;
806
7cc2ade2 807 default:
801c1e8d
JH
808 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
809 err = -EINVAL;
810 goto done;
811 }
812
813 if (haddr.hci_dev != HCI_DEV_NONE) {
814 err = -EINVAL;
815 goto done;
816 }
817
1195fbb8
MH
818 /* Users with CAP_NET_ADMIN capabilities are allowed
819 * access to all management commands and events. For
820 * untrusted users the interface is restricted and
821 * also only untrusted events are sent.
50ebc055 822 */
1195fbb8
MH
823 if (capable(CAP_NET_ADMIN))
824 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
50ebc055 825
f9207338
MH
826 /* At the moment the index and unconfigured index events
827 * are enabled unconditionally. Setting them on each
828 * socket when binding keeps this functionality. They
829 * however might be cleared later and then sending of these
830 * events will be disabled, but that is then intentional.
f6b7712e
MH
831 *
832 * This also enables generic events that are safe to be
833 * received by untrusted users. Example for such events
834 * are changes to settings, class of device, name etc.
f9207338
MH
835 */
836 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
837 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
838 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
f6b7712e 839 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
f9207338 840 }
801c1e8d 841 break;
1da177e4
LT
842 }
843
7cc2ade2 844
0381101f 845 hci_pi(sk)->channel = haddr.hci_channel;
1da177e4
LT
846 sk->sk_state = BT_BOUND;
847
848done:
849 release_sock(sk);
850 return err;
851}
852
8fc9ced3
GP
853static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
854 int *addr_len, int peer)
1da177e4
LT
855{
856 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
857 struct sock *sk = sock->sk;
9d4b68b2
MH
858 struct hci_dev *hdev;
859 int err = 0;
1da177e4
LT
860
861 BT_DBG("sock %p sk %p", sock, sk);
862
06f43cbc
MH
863 if (peer)
864 return -EOPNOTSUPP;
865
1da177e4
LT
866 lock_sock(sk);
867
9d4b68b2
MH
868 hdev = hci_pi(sk)->hdev;
869 if (!hdev) {
870 err = -EBADFD;
871 goto done;
872 }
873
1da177e4
LT
874 *addr_len = sizeof(*haddr);
875 haddr->hci_family = AF_BLUETOOTH;
7b005bd3 876 haddr->hci_dev = hdev->id;
9d4b68b2 877 haddr->hci_channel= hci_pi(sk)->channel;
1da177e4 878
9d4b68b2 879done:
1da177e4 880 release_sock(sk);
9d4b68b2 881 return err;
1da177e4
LT
882}
883
6039aa73
GP
884static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
885 struct sk_buff *skb)
1da177e4
LT
886{
887 __u32 mask = hci_pi(sk)->cmsg_mask;
888
0d48d939
MH
889 if (mask & HCI_CMSG_DIR) {
890 int incoming = bt_cb(skb)->incoming;
8fc9ced3
GP
891 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
892 &incoming);
0d48d939 893 }
1da177e4 894
a61bbcf2 895 if (mask & HCI_CMSG_TSTAMP) {
f6e623a6
JFS
896#ifdef CONFIG_COMPAT
897 struct compat_timeval ctv;
898#endif
a61bbcf2 899 struct timeval tv;
767c5eb5
MH
900 void *data;
901 int len;
a61bbcf2
PM
902
903 skb_get_timestamp(skb, &tv);
767c5eb5 904
1da97f83
DM
905 data = &tv;
906 len = sizeof(tv);
907#ifdef CONFIG_COMPAT
da88cea1
L
908 if (!COMPAT_USE_64BIT_TIME &&
909 (msg->msg_flags & MSG_CMSG_COMPAT)) {
767c5eb5
MH
910 ctv.tv_sec = tv.tv_sec;
911 ctv.tv_usec = tv.tv_usec;
912 data = &ctv;
913 len = sizeof(ctv);
767c5eb5 914 }
1da97f83 915#endif
767c5eb5
MH
916
917 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
a61bbcf2 918 }
1da177e4 919}
8e87d142 920
1b784140
YX
921static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
922 int flags)
1da177e4
LT
923{
924 int noblock = flags & MSG_DONTWAIT;
925 struct sock *sk = sock->sk;
926 struct sk_buff *skb;
927 int copied, err;
928
929 BT_DBG("sock %p, sk %p", sock, sk);
930
931 if (flags & (MSG_OOB))
932 return -EOPNOTSUPP;
933
934 if (sk->sk_state == BT_CLOSED)
935 return 0;
936
70f23020
AE
937 skb = skb_recv_datagram(sk, flags, noblock, &err);
938 if (!skb)
1da177e4
LT
939 return err;
940
1da177e4
LT
941 copied = skb->len;
942 if (len < copied) {
943 msg->msg_flags |= MSG_TRUNC;
944 copied = len;
945 }
946
badff6d0 947 skb_reset_transport_header(skb);
51f3d02b 948 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1da177e4 949
3a208627
MH
950 switch (hci_pi(sk)->channel) {
951 case HCI_CHANNEL_RAW:
952 hci_sock_cmsg(sk, msg, skb);
953 break;
23500189 954 case HCI_CHANNEL_USER:
cd82e61c
MH
955 case HCI_CHANNEL_MONITOR:
956 sock_recv_timestamp(msg, sk, skb);
957 break;
801c1e8d
JH
958 default:
959 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
960 sock_recv_timestamp(msg, sk, skb);
961 break;
3a208627 962 }
1da177e4
LT
963
964 skb_free_datagram(sk, skb);
965
966 return err ? : copied;
967}
968
fa4335d7
JH
969static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
970 struct msghdr *msg, size_t msglen)
971{
972 void *buf;
973 u8 *cp;
974 struct mgmt_hdr *hdr;
975 u16 opcode, index, len;
976 struct hci_dev *hdev = NULL;
977 const struct hci_mgmt_handler *handler;
978 bool var_len, no_hdev;
979 int err;
980
981 BT_DBG("got %zu bytes", msglen);
982
983 if (msglen < sizeof(*hdr))
984 return -EINVAL;
985
986 buf = kmalloc(msglen, GFP_KERNEL);
987 if (!buf)
988 return -ENOMEM;
989
990 if (memcpy_from_msg(buf, msg, msglen)) {
991 err = -EFAULT;
992 goto done;
993 }
994
995 hdr = buf;
996 opcode = __le16_to_cpu(hdr->opcode);
997 index = __le16_to_cpu(hdr->index);
998 len = __le16_to_cpu(hdr->len);
999
1000 if (len != msglen - sizeof(*hdr)) {
1001 err = -EINVAL;
1002 goto done;
1003 }
1004
1005 if (opcode >= chan->handler_count ||
1006 chan->handlers[opcode].func == NULL) {
1007 BT_DBG("Unknown op %u", opcode);
1008 err = mgmt_cmd_status(sk, index, opcode,
1009 MGMT_STATUS_UNKNOWN_COMMAND);
1010 goto done;
1011 }
1012
1013 handler = &chan->handlers[opcode];
1014
1015 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1016 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1017 err = mgmt_cmd_status(sk, index, opcode,
1018 MGMT_STATUS_PERMISSION_DENIED);
1019 goto done;
1020 }
1021
1022 if (index != MGMT_INDEX_NONE) {
1023 hdev = hci_dev_get(index);
1024 if (!hdev) {
1025 err = mgmt_cmd_status(sk, index, opcode,
1026 MGMT_STATUS_INVALID_INDEX);
1027 goto done;
1028 }
1029
1030 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1031 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1032 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1033 err = mgmt_cmd_status(sk, index, opcode,
1034 MGMT_STATUS_INVALID_INDEX);
1035 goto done;
1036 }
1037
1038 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1039 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1040 err = mgmt_cmd_status(sk, index, opcode,
1041 MGMT_STATUS_INVALID_INDEX);
1042 goto done;
1043 }
1044 }
1045
1046 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1047 if (no_hdev != !hdev) {
1048 err = mgmt_cmd_status(sk, index, opcode,
1049 MGMT_STATUS_INVALID_INDEX);
1050 goto done;
1051 }
1052
1053 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1054 if ((var_len && len < handler->data_len) ||
1055 (!var_len && len != handler->data_len)) {
1056 err = mgmt_cmd_status(sk, index, opcode,
1057 MGMT_STATUS_INVALID_PARAMS);
1058 goto done;
1059 }
1060
1061 if (hdev && chan->hdev_init)
1062 chan->hdev_init(sk, hdev);
1063
1064 cp = buf + sizeof(*hdr);
1065
1066 err = handler->func(sk, hdev, cp, len);
1067 if (err < 0)
1068 goto done;
1069
1070 err = msglen;
1071
1072done:
1073 if (hdev)
1074 hci_dev_put(hdev);
1075
1076 kfree(buf);
1077 return err;
1078}
1079
1b784140
YX
1080static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1081 size_t len)
1da177e4
LT
1082{
1083 struct sock *sk = sock->sk;
801c1e8d 1084 struct hci_mgmt_chan *chan;
1da177e4
LT
1085 struct hci_dev *hdev;
1086 struct sk_buff *skb;
1087 int err;
1088
1089 BT_DBG("sock %p sk %p", sock, sk);
1090
1091 if (msg->msg_flags & MSG_OOB)
1092 return -EOPNOTSUPP;
1093
1094 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
1095 return -EINVAL;
1096
1097 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1098 return -EINVAL;
1099
1100 lock_sock(sk);
1101
0381101f
JH
1102 switch (hci_pi(sk)->channel) {
1103 case HCI_CHANNEL_RAW:
23500189 1104 case HCI_CHANNEL_USER:
0381101f 1105 break;
cd82e61c
MH
1106 case HCI_CHANNEL_MONITOR:
1107 err = -EOPNOTSUPP;
1108 goto done;
0381101f 1109 default:
801c1e8d
JH
1110 mutex_lock(&mgmt_chan_list_lock);
1111 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1112 if (chan)
fa4335d7 1113 err = hci_mgmt_cmd(chan, sk, msg, len);
801c1e8d
JH
1114 else
1115 err = -EINVAL;
1116
1117 mutex_unlock(&mgmt_chan_list_lock);
0381101f
JH
1118 goto done;
1119 }
1120
70f23020
AE
1121 hdev = hci_pi(sk)->hdev;
1122 if (!hdev) {
1da177e4
LT
1123 err = -EBADFD;
1124 goto done;
1125 }
1126
7e21addc
MH
1127 if (!test_bit(HCI_UP, &hdev->flags)) {
1128 err = -ENETDOWN;
1129 goto done;
1130 }
1131
70f23020
AE
1132 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1133 if (!skb)
1da177e4
LT
1134 goto done;
1135
6ce8e9ce 1136 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1da177e4
LT
1137 err = -EFAULT;
1138 goto drop;
1139 }
1140
0d48d939 1141 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1da177e4 1142 skb_pull(skb, 1);
1da177e4 1143
1bc5ad16
MH
1144 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1145 /* No permission check is needed for user channel
1146 * since that gets enforced when binding the socket.
1147 *
1148 * However check that the packet type is valid.
1149 */
1150 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1151 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1152 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1153 err = -EINVAL;
1154 goto drop;
1155 }
1156
1157 skb_queue_tail(&hdev->raw_q, skb);
1158 queue_work(hdev->workqueue, &hdev->tx_work);
1159 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
83985319 1160 u16 opcode = get_unaligned_le16(skb->data);
1da177e4
LT
1161 u16 ogf = hci_opcode_ogf(opcode);
1162 u16 ocf = hci_opcode_ocf(opcode);
1163
1164 if (((ogf > HCI_SFLT_MAX_OGF) ||
3bb3c755
GP
1165 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1166 &hci_sec_filter.ocf_mask[ogf])) &&
1167 !capable(CAP_NET_RAW)) {
1da177e4
LT
1168 err = -EPERM;
1169 goto drop;
1170 }
1171
fee746b0 1172 if (ogf == 0x3f) {
1da177e4 1173 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 1174 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 1175 } else {
49c922bb 1176 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
1177 * single-command requests.
1178 */
db6e3e8d 1179 bt_cb(skb)->req.start = true;
11714b3d 1180
1da177e4 1181 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 1182 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
1183 }
1184 } else {
1185 if (!capable(CAP_NET_RAW)) {
1186 err = -EPERM;
1187 goto drop;
1188 }
1189
1190 skb_queue_tail(&hdev->raw_q, skb);
3eff45ea 1191 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
1192 }
1193
1194 err = len;
1195
1196done:
1197 release_sock(sk);
1198 return err;
1199
1200drop:
1201 kfree_skb(skb);
1202 goto done;
1203}
1204
8fc9ced3
GP
1205static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1206 char __user *optval, unsigned int len)
1da177e4
LT
1207{
1208 struct hci_ufilter uf = { .opcode = 0 };
1209 struct sock *sk = sock->sk;
1210 int err = 0, opt = 0;
1211
1212 BT_DBG("sk %p, opt %d", sk, optname);
1213
1214 lock_sock(sk);
1215
2f39cdb7 1216 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 1217 err = -EBADFD;
2f39cdb7
MH
1218 goto done;
1219 }
1220
1da177e4
LT
1221 switch (optname) {
1222 case HCI_DATA_DIR:
1223 if (get_user(opt, (int __user *)optval)) {
1224 err = -EFAULT;
1225 break;
1226 }
1227
1228 if (opt)
1229 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1230 else
1231 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1232 break;
1233
1234 case HCI_TIME_STAMP:
1235 if (get_user(opt, (int __user *)optval)) {
1236 err = -EFAULT;
1237 break;
1238 }
1239
1240 if (opt)
1241 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1242 else
1243 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1244 break;
1245
1246 case HCI_FILTER:
0878b666
MH
1247 {
1248 struct hci_filter *f = &hci_pi(sk)->filter;
1249
1250 uf.type_mask = f->type_mask;
1251 uf.opcode = f->opcode;
1252 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1253 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1254 }
1255
1da177e4
LT
1256 len = min_t(unsigned int, len, sizeof(uf));
1257 if (copy_from_user(&uf, optval, len)) {
1258 err = -EFAULT;
1259 break;
1260 }
1261
1262 if (!capable(CAP_NET_RAW)) {
1263 uf.type_mask &= hci_sec_filter.type_mask;
1264 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1265 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1266 }
1267
1268 {
1269 struct hci_filter *f = &hci_pi(sk)->filter;
1270
1271 f->type_mask = uf.type_mask;
1272 f->opcode = uf.opcode;
1273 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1274 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1275 }
8e87d142 1276 break;
1da177e4
LT
1277
1278 default:
1279 err = -ENOPROTOOPT;
1280 break;
1281 }
1282
2f39cdb7 1283done:
1da177e4
LT
1284 release_sock(sk);
1285 return err;
1286}
1287
8fc9ced3
GP
1288static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1289 char __user *optval, int __user *optlen)
1da177e4
LT
1290{
1291 struct hci_ufilter uf;
1292 struct sock *sk = sock->sk;
cedc5469
MH
1293 int len, opt, err = 0;
1294
1295 BT_DBG("sk %p, opt %d", sk, optname);
1da177e4
LT
1296
1297 if (get_user(len, optlen))
1298 return -EFAULT;
1299
cedc5469
MH
1300 lock_sock(sk);
1301
1302 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
c2371e80 1303 err = -EBADFD;
cedc5469
MH
1304 goto done;
1305 }
1306
1da177e4
LT
1307 switch (optname) {
1308 case HCI_DATA_DIR:
1309 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1310 opt = 1;
8e87d142 1311 else
1da177e4
LT
1312 opt = 0;
1313
1314 if (put_user(opt, optval))
cedc5469 1315 err = -EFAULT;
1da177e4
LT
1316 break;
1317
1318 case HCI_TIME_STAMP:
1319 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1320 opt = 1;
8e87d142 1321 else
1da177e4
LT
1322 opt = 0;
1323
1324 if (put_user(opt, optval))
cedc5469 1325 err = -EFAULT;
1da177e4
LT
1326 break;
1327
1328 case HCI_FILTER:
1329 {
1330 struct hci_filter *f = &hci_pi(sk)->filter;
1331
e15ca9a0 1332 memset(&uf, 0, sizeof(uf));
1da177e4
LT
1333 uf.type_mask = f->type_mask;
1334 uf.opcode = f->opcode;
1335 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1336 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1337 }
1338
1339 len = min_t(unsigned int, len, sizeof(uf));
1340 if (copy_to_user(optval, &uf, len))
cedc5469 1341 err = -EFAULT;
1da177e4
LT
1342 break;
1343
1344 default:
cedc5469 1345 err = -ENOPROTOOPT;
1da177e4
LT
1346 break;
1347 }
1348
cedc5469
MH
1349done:
1350 release_sock(sk);
1351 return err;
1da177e4
LT
1352}
1353
90ddc4f0 1354static const struct proto_ops hci_sock_ops = {
1da177e4
LT
1355 .family = PF_BLUETOOTH,
1356 .owner = THIS_MODULE,
1357 .release = hci_sock_release,
1358 .bind = hci_sock_bind,
1359 .getname = hci_sock_getname,
1360 .sendmsg = hci_sock_sendmsg,
1361 .recvmsg = hci_sock_recvmsg,
1362 .ioctl = hci_sock_ioctl,
1363 .poll = datagram_poll,
1364 .listen = sock_no_listen,
1365 .shutdown = sock_no_shutdown,
1366 .setsockopt = hci_sock_setsockopt,
1367 .getsockopt = hci_sock_getsockopt,
1368 .connect = sock_no_connect,
1369 .socketpair = sock_no_socketpair,
1370 .accept = sock_no_accept,
1371 .mmap = sock_no_mmap
1372};
1373
1374static struct proto hci_sk_proto = {
1375 .name = "HCI",
1376 .owner = THIS_MODULE,
1377 .obj_size = sizeof(struct hci_pinfo)
1378};
1379
3f378b68
EP
1380static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1381 int kern)
1da177e4
LT
1382{
1383 struct sock *sk;
1384
1385 BT_DBG("sock %p", sock);
1386
1387 if (sock->type != SOCK_RAW)
1388 return -ESOCKTNOSUPPORT;
1389
1390 sock->ops = &hci_sock_ops;
1391
11aa9c28 1392 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1da177e4
LT
1393 if (!sk)
1394 return -ENOMEM;
1395
1396 sock_init_data(sock, sk);
1397
1398 sock_reset_flag(sk, SOCK_ZAPPED);
1399
1400 sk->sk_protocol = protocol;
1401
1402 sock->state = SS_UNCONNECTED;
1403 sk->sk_state = BT_OPEN;
1404
1405 bt_sock_link(&hci_sk_list, sk);
1406 return 0;
1407}
1408
ec1b4cf7 1409static const struct net_proto_family hci_sock_family_ops = {
1da177e4
LT
1410 .family = PF_BLUETOOTH,
1411 .owner = THIS_MODULE,
1412 .create = hci_sock_create,
1413};
1414
1da177e4
LT
1415int __init hci_sock_init(void)
1416{
1417 int err;
1418
b0a8e282
MH
1419 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1420
1da177e4
LT
1421 err = proto_register(&hci_sk_proto, 0);
1422 if (err < 0)
1423 return err;
1424
1425 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
f7c86637
MY
1426 if (err < 0) {
1427 BT_ERR("HCI socket registration failed");
1da177e4 1428 goto error;
f7c86637
MY
1429 }
1430
b0316615 1431 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
f7c86637
MY
1432 if (err < 0) {
1433 BT_ERR("Failed to create HCI proc file");
1434 bt_sock_unregister(BTPROTO_HCI);
1435 goto error;
1436 }
1da177e4 1437
1da177e4
LT
1438 BT_INFO("HCI socket layer initialized");
1439
1440 return 0;
1441
1442error:
1da177e4
LT
1443 proto_unregister(&hci_sk_proto);
1444 return err;
1445}
1446
b7440a14 1447void hci_sock_cleanup(void)
1da177e4 1448{
f7c86637 1449 bt_procfs_cleanup(&init_net, "hci");
5e9d7f86 1450 bt_sock_unregister(BTPROTO_HCI);
1da177e4 1451 proto_unregister(&hci_sk_proto);
1da177e4 1452}