2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "mgmt_util.h"
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
42 static DEFINE_IDA(sock_cookie_ida);
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
46 /* ----- HCI socket interface ----- */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
54 struct hci_filter filter;
56 unsigned short channel;
59 char comm[TASK_COMM_LEN];
63 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
65 struct hci_dev *hdev = hci_pi(sk)->hdev;
68 return ERR_PTR(-EBADFD);
69 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
70 return ERR_PTR(-EPIPE);
74 void hci_sock_set_flag(struct sock *sk, int nr)
76 set_bit(nr, &hci_pi(sk)->flags);
79 void hci_sock_clear_flag(struct sock *sk, int nr)
81 clear_bit(nr, &hci_pi(sk)->flags);
84 int hci_sock_test_flag(struct sock *sk, int nr)
86 return test_bit(nr, &hci_pi(sk)->flags);
89 unsigned short hci_sock_get_channel(struct sock *sk)
91 return hci_pi(sk)->channel;
94 u32 hci_sock_get_cookie(struct sock *sk)
96 return hci_pi(sk)->cookie;
99 static bool hci_sock_gen_cookie(struct sock *sk)
101 int id = hci_pi(sk)->cookie;
104 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
108 hci_pi(sk)->cookie = id;
109 get_task_comm(hci_pi(sk)->comm, current);
116 static void hci_sock_free_cookie(struct sock *sk)
118 int id = hci_pi(sk)->cookie;
121 hci_pi(sk)->cookie = 0xffffffff;
122 ida_simple_remove(&sock_cookie_ida, id);
126 static inline int hci_test_bit(int nr, const void *addr)
128 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
131 /* Security filter */
132 #define HCI_SFLT_MAX_OGF 5
134 struct hci_sec_filter {
137 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
140 static const struct hci_sec_filter hci_sec_filter = {
144 { 0x1000d9fe, 0x0000b00c },
149 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
150 /* OGF_LINK_POLICY */
151 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
153 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
155 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
156 /* OGF_STATUS_PARAM */
157 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
161 static struct bt_sock_list hci_sk_list = {
162 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
165 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
167 struct hci_filter *flt;
168 int flt_type, flt_event;
171 flt = &hci_pi(sk)->filter;
173 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
175 if (!test_bit(flt_type, &flt->type_mask))
178 /* Extra filter for event packets only */
179 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
182 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
184 if (!hci_test_bit(flt_event, &flt->event_mask))
187 /* Check filter only when opcode is set */
191 if (flt_event == HCI_EV_CMD_COMPLETE &&
192 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
195 if (flt_event == HCI_EV_CMD_STATUS &&
196 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
202 /* Send frame to RAW socket */
203 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
206 struct sk_buff *skb_copy = NULL;
208 BT_DBG("hdev %p len %d", hdev, skb->len);
210 read_lock(&hci_sk_list.lock);
212 sk_for_each(sk, &hci_sk_list.head) {
213 struct sk_buff *nskb;
215 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
218 /* Don't send frame to the socket it came from */
222 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
223 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
224 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
225 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
226 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
227 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
229 if (is_filtered_packet(sk, skb))
231 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
232 if (!bt_cb(skb)->incoming)
234 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
235 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
236 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
237 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
240 /* Don't send frame to other channel types */
245 /* Create a private copy with headroom */
246 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
250 /* Put type byte before the data */
251 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
254 nskb = skb_clone(skb_copy, GFP_ATOMIC);
258 if (sock_queue_rcv_skb(sk, nskb))
262 read_unlock(&hci_sk_list.lock);
267 static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb)
269 struct scm_creds *creds;
271 if (!sk || WARN_ON(!skb))
274 creds = &bt_cb(skb)->creds;
276 /* Check if peer credentials is set */
277 if (!sk->sk_peer_pid) {
278 /* Check if parent peer credentials is set */
279 if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid)
280 sk = bt_sk(sk)->parent;
285 /* Check if scm_creds already set */
286 if (creds->pid == pid_vnr(sk->sk_peer_pid))
289 memset(creds, 0, sizeof(*creds));
291 creds->pid = pid_vnr(sk->sk_peer_pid);
292 if (sk->sk_peer_cred) {
293 creds->uid = sk->sk_peer_cred->uid;
294 creds->gid = sk->sk_peer_cred->gid;
298 static struct sk_buff *hci_skb_clone(struct sk_buff *skb)
300 struct sk_buff *nskb;
305 nskb = skb_clone(skb, GFP_ATOMIC);
309 hci_sock_copy_creds(skb->sk, nskb);
314 /* Send frame to sockets with specific channel */
315 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
316 int flag, struct sock *skip_sk)
320 BT_DBG("channel %u len %d", channel, skb->len);
322 sk_for_each(sk, &hci_sk_list.head) {
323 struct sk_buff *nskb;
325 /* Ignore socket without the flag set */
326 if (!hci_sock_test_flag(sk, flag))
329 /* Skip the original socket */
333 if (sk->sk_state != BT_BOUND)
336 if (hci_pi(sk)->channel != channel)
339 nskb = hci_skb_clone(skb);
343 if (sock_queue_rcv_skb(sk, nskb))
349 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
350 int flag, struct sock *skip_sk)
352 read_lock(&hci_sk_list.lock);
353 __hci_send_to_channel(channel, skb, flag, skip_sk);
354 read_unlock(&hci_sk_list.lock);
357 /* Send frame to monitor socket */
358 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
360 struct sk_buff *skb_copy = NULL;
361 struct hci_mon_hdr *hdr;
364 if (!atomic_read(&monitor_promisc))
367 BT_DBG("hdev %p len %d", hdev, skb->len);
369 switch (hci_skb_pkt_type(skb)) {
370 case HCI_COMMAND_PKT:
371 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
374 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
376 case HCI_ACLDATA_PKT:
377 if (bt_cb(skb)->incoming)
378 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
380 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
382 case HCI_SCODATA_PKT:
383 if (bt_cb(skb)->incoming)
384 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
386 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
388 case HCI_ISODATA_PKT:
389 if (bt_cb(skb)->incoming)
390 opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
392 opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
395 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
401 /* Create a private copy with headroom */
402 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
406 hci_sock_copy_creds(skb->sk, skb_copy);
408 /* Put header before the data */
409 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
410 hdr->opcode = opcode;
411 hdr->index = cpu_to_le16(hdev->id);
412 hdr->len = cpu_to_le16(skb->len);
414 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
415 HCI_SOCK_TRUSTED, NULL);
419 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
420 void *data, u16 data_len, ktime_t tstamp,
421 int flag, struct sock *skip_sk)
427 index = cpu_to_le16(hdev->id);
429 index = cpu_to_le16(MGMT_INDEX_NONE);
431 read_lock(&hci_sk_list.lock);
433 sk_for_each(sk, &hci_sk_list.head) {
434 struct hci_mon_hdr *hdr;
437 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
440 /* Ignore socket without the flag set */
441 if (!hci_sock_test_flag(sk, flag))
444 /* Skip the original socket */
448 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
452 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
453 put_unaligned_le16(event, skb_put(skb, 2));
456 skb_put_data(skb, data, data_len);
458 skb->tstamp = tstamp;
460 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
461 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
463 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
465 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
466 HCI_SOCK_TRUSTED, NULL);
470 read_unlock(&hci_sk_list.lock);
473 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
475 struct hci_mon_hdr *hdr;
476 struct hci_mon_new_index *ni;
477 struct hci_mon_index_info *ii;
483 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
487 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
488 ni->type = hdev->dev_type;
490 bacpy(&ni->bdaddr, &hdev->bdaddr);
491 memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
492 strnlen(hdev->name, sizeof(ni->name)), '\0');
494 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
498 skb = bt_skb_alloc(0, GFP_ATOMIC);
502 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
506 if (hdev->manufacturer == 0xffff)
511 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
515 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
516 bacpy(&ii->bdaddr, &hdev->bdaddr);
517 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
519 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
523 skb = bt_skb_alloc(0, GFP_ATOMIC);
527 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
531 skb = bt_skb_alloc(0, GFP_ATOMIC);
535 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
542 __net_timestamp(skb);
544 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
545 hdr->opcode = opcode;
546 hdr->index = cpu_to_le16(hdev->id);
547 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
552 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
554 struct hci_mon_hdr *hdr;
560 /* No message needed when cookie is not present */
561 if (!hci_pi(sk)->cookie)
564 switch (hci_pi(sk)->channel) {
565 case HCI_CHANNEL_RAW:
567 ver[0] = BT_SUBSYS_VERSION;
568 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
570 case HCI_CHANNEL_USER:
572 ver[0] = BT_SUBSYS_VERSION;
573 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
575 case HCI_CHANNEL_CONTROL:
577 mgmt_fill_version_info(ver);
580 /* No message for unsupported format */
584 skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC);
588 hci_sock_copy_creds(sk, skb);
590 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
592 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
593 put_unaligned_le16(format, skb_put(skb, 2));
594 skb_put_data(skb, ver, sizeof(ver));
595 put_unaligned_le32(flags, skb_put(skb, 4));
596 skb_put_u8(skb, TASK_COMM_LEN);
597 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
599 __net_timestamp(skb);
601 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
602 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
603 if (hci_pi(sk)->hdev)
604 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
606 hdr->index = cpu_to_le16(HCI_DEV_NONE);
607 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
612 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
614 struct hci_mon_hdr *hdr;
617 /* No message needed when cookie is not present */
618 if (!hci_pi(sk)->cookie)
621 switch (hci_pi(sk)->channel) {
622 case HCI_CHANNEL_RAW:
623 case HCI_CHANNEL_USER:
624 case HCI_CHANNEL_CONTROL:
627 /* No message for unsupported format */
631 skb = bt_skb_alloc(4, GFP_ATOMIC);
635 hci_sock_copy_creds(sk, skb);
637 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
639 __net_timestamp(skb);
641 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
642 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
643 if (hci_pi(sk)->hdev)
644 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
646 hdr->index = cpu_to_le16(HCI_DEV_NONE);
647 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
652 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
656 struct hci_mon_hdr *hdr;
659 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
663 hci_sock_copy_creds(sk, skb);
665 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
666 put_unaligned_le16(opcode, skb_put(skb, 2));
669 skb_put_data(skb, buf, len);
671 __net_timestamp(skb);
673 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
674 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
675 hdr->index = cpu_to_le16(index);
676 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
681 static void __printf(2, 3)
682 send_monitor_note(struct sock *sk, const char *fmt, ...)
685 struct hci_mon_hdr *hdr;
690 len = vsnprintf(NULL, 0, fmt, args);
693 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
697 hci_sock_copy_creds(sk, skb);
700 vsprintf(skb_put(skb, len), fmt, args);
701 *(u8 *)skb_put(skb, 1) = 0;
704 __net_timestamp(skb);
706 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
707 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
708 hdr->index = cpu_to_le16(HCI_DEV_NONE);
709 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
711 if (sock_queue_rcv_skb(sk, skb))
715 static void send_monitor_replay(struct sock *sk)
717 struct hci_dev *hdev;
719 read_lock(&hci_dev_list_lock);
721 list_for_each_entry(hdev, &hci_dev_list, list) {
724 skb = create_monitor_event(hdev, HCI_DEV_REG);
728 if (sock_queue_rcv_skb(sk, skb))
731 if (!test_bit(HCI_RUNNING, &hdev->flags))
734 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
738 if (sock_queue_rcv_skb(sk, skb))
741 if (test_bit(HCI_UP, &hdev->flags))
742 skb = create_monitor_event(hdev, HCI_DEV_UP);
743 else if (hci_dev_test_flag(hdev, HCI_SETUP))
744 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
749 if (sock_queue_rcv_skb(sk, skb))
754 read_unlock(&hci_dev_list_lock);
757 static void send_monitor_control_replay(struct sock *mon_sk)
761 read_lock(&hci_sk_list.lock);
763 sk_for_each(sk, &hci_sk_list.head) {
766 skb = create_monitor_ctrl_open(sk);
770 if (sock_queue_rcv_skb(mon_sk, skb))
774 read_unlock(&hci_sk_list.lock);
777 /* Generate internal stack event */
778 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
780 struct hci_event_hdr *hdr;
781 struct hci_ev_stack_internal *ev;
784 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
788 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
789 hdr->evt = HCI_EV_STACK_INTERNAL;
790 hdr->plen = sizeof(*ev) + dlen;
792 ev = skb_put(skb, sizeof(*ev) + dlen);
794 memcpy(ev->data, data, dlen);
796 bt_cb(skb)->incoming = 1;
797 __net_timestamp(skb);
799 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
800 hci_send_to_sock(hdev, skb);
804 void hci_sock_dev_event(struct hci_dev *hdev, int event)
806 BT_DBG("hdev %s event %d", hdev->name, event);
808 if (atomic_read(&monitor_promisc)) {
811 /* Send event to monitor */
812 skb = create_monitor_event(hdev, event);
814 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
815 HCI_SOCK_TRUSTED, NULL);
820 if (event <= HCI_DEV_DOWN) {
821 struct hci_ev_si_device ev;
823 /* Send event to sockets */
825 ev.dev_id = hdev->id;
826 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
829 if (event == HCI_DEV_UNREG) {
832 /* Wake up sockets using this dead device */
833 read_lock(&hci_sk_list.lock);
834 sk_for_each(sk, &hci_sk_list.head) {
835 if (hci_pi(sk)->hdev == hdev) {
837 sk->sk_state_change(sk);
840 read_unlock(&hci_sk_list.lock);
844 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
846 struct hci_mgmt_chan *c;
848 list_for_each_entry(c, &mgmt_chan_list, list) {
849 if (c->channel == channel)
856 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
858 struct hci_mgmt_chan *c;
860 mutex_lock(&mgmt_chan_list_lock);
861 c = __hci_mgmt_chan_find(channel);
862 mutex_unlock(&mgmt_chan_list_lock);
867 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
869 if (c->channel < HCI_CHANNEL_CONTROL)
872 mutex_lock(&mgmt_chan_list_lock);
873 if (__hci_mgmt_chan_find(c->channel)) {
874 mutex_unlock(&mgmt_chan_list_lock);
878 list_add_tail(&c->list, &mgmt_chan_list);
880 mutex_unlock(&mgmt_chan_list_lock);
884 EXPORT_SYMBOL(hci_mgmt_chan_register);
886 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
888 mutex_lock(&mgmt_chan_list_lock);
890 mutex_unlock(&mgmt_chan_list_lock);
892 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
894 static int hci_sock_release(struct socket *sock)
896 struct sock *sk = sock->sk;
897 struct hci_dev *hdev;
900 BT_DBG("sock %p sk %p", sock, sk);
907 switch (hci_pi(sk)->channel) {
908 case HCI_CHANNEL_MONITOR:
909 atomic_dec(&monitor_promisc);
911 case HCI_CHANNEL_RAW:
912 case HCI_CHANNEL_USER:
913 case HCI_CHANNEL_CONTROL:
914 /* Send event to monitor */
915 skb = create_monitor_ctrl_close(sk);
917 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
918 HCI_SOCK_TRUSTED, NULL);
922 hci_sock_free_cookie(sk);
926 bt_sock_unlink(&hci_sk_list, sk);
928 hdev = hci_pi(sk)->hdev;
930 if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
931 !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
932 /* When releasing a user channel exclusive access,
933 * call hci_dev_do_close directly instead of calling
934 * hci_dev_close to ensure the exclusive access will
935 * be released and the controller brought back down.
937 * The checking of HCI_AUTO_OFF is not needed in this
938 * case since it will have been cleared already when
939 * opening the user channel.
941 * Make sure to also check that we haven't already
942 * unregistered since all the cleanup will have already
943 * been complete and hdev will get released when we put
946 hci_dev_do_close(hdev);
947 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
948 mgmt_index_added(hdev);
951 atomic_dec(&hdev->promisc);
961 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
966 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
971 err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
973 hci_dev_unlock(hdev);
978 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
983 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
988 err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
990 hci_dev_unlock(hdev);
995 /* Ioctls that require bound socket */
996 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
999 struct hci_dev *hdev = hci_hdev_from_sock(sk);
1002 return PTR_ERR(hdev);
1004 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1007 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1010 if (hdev->dev_type != HCI_PRIMARY)
1015 if (!capable(CAP_NET_ADMIN))
1019 case HCIGETCONNINFO:
1020 return hci_get_conn_info(hdev, (void __user *)arg);
1022 case HCIGETAUTHINFO:
1023 return hci_get_auth_info(hdev, (void __user *)arg);
1026 if (!capable(CAP_NET_ADMIN))
1028 return hci_sock_reject_list_add(hdev, (void __user *)arg);
1030 case HCIUNBLOCKADDR:
1031 if (!capable(CAP_NET_ADMIN))
1033 return hci_sock_reject_list_del(hdev, (void __user *)arg);
1036 return -ENOIOCTLCMD;
1039 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
1042 void __user *argp = (void __user *)arg;
1043 struct sock *sk = sock->sk;
1046 BT_DBG("cmd %x arg %lx", cmd, arg);
1048 /* Make sure the cmd is valid before doing anything */
1052 case HCIGETCONNLIST:
1062 case HCISETLINKMODE:
1067 case HCIGETCONNINFO:
1068 case HCIGETAUTHINFO:
1070 case HCIUNBLOCKADDR:
1073 return -ENOIOCTLCMD;
1078 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1083 /* When calling an ioctl on an unbound raw socket, then ensure
1084 * that the monitor gets informed. Ensure that the resulting event
1085 * is only send once by checking if the cookie exists or not. The
1086 * socket cookie will be only ever generated once for the lifetime
1087 * of a given socket.
1089 if (hci_sock_gen_cookie(sk)) {
1090 struct sk_buff *skb;
1092 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1093 * flag. Make sure that not only the current task but also
1094 * the socket opener has the required capability, since
1095 * privileged programs can be tricked into making ioctl calls
1096 * on HCI sockets, and the socket should not be marked as
1097 * trusted simply because the ioctl caller is privileged.
1099 if (sk_capable(sk, CAP_NET_ADMIN))
1100 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1102 /* Send event to monitor */
1103 skb = create_monitor_ctrl_open(sk);
1105 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1106 HCI_SOCK_TRUSTED, NULL);
1115 return hci_get_dev_list(argp);
1118 return hci_get_dev_info(argp);
1120 case HCIGETCONNLIST:
1121 return hci_get_conn_list(argp);
1124 if (!capable(CAP_NET_ADMIN))
1126 return hci_dev_open(arg);
1129 if (!capable(CAP_NET_ADMIN))
1131 return hci_dev_close(arg);
1134 if (!capable(CAP_NET_ADMIN))
1136 return hci_dev_reset(arg);
1139 if (!capable(CAP_NET_ADMIN))
1141 return hci_dev_reset_stat(arg);
1148 case HCISETLINKMODE:
1151 if (!capable(CAP_NET_ADMIN))
1153 return hci_dev_cmd(cmd, argp);
1156 return hci_inquiry(argp);
1161 err = hci_sock_bound_ioctl(sk, cmd, arg);
1168 #ifdef CONFIG_COMPAT
1169 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1177 return hci_sock_ioctl(sock, cmd, arg);
1180 return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1184 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1187 struct sockaddr_hci haddr;
1188 struct sock *sk = sock->sk;
1189 struct hci_dev *hdev = NULL;
1190 struct sk_buff *skb;
1193 BT_DBG("sock %p sk %p", sock, sk);
1198 memset(&haddr, 0, sizeof(haddr));
1199 len = min_t(unsigned int, sizeof(haddr), addr_len);
1200 memcpy(&haddr, addr, len);
1202 if (haddr.hci_family != AF_BLUETOOTH)
1207 /* Allow detaching from dead device and attaching to alive device, if
1208 * the caller wants to re-bind (instead of close) this socket in
1209 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1211 hdev = hci_pi(sk)->hdev;
1212 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1213 hci_pi(sk)->hdev = NULL;
1214 sk->sk_state = BT_OPEN;
1219 if (sk->sk_state == BT_BOUND) {
1224 switch (haddr.hci_channel) {
1225 case HCI_CHANNEL_RAW:
1226 if (hci_pi(sk)->hdev) {
1231 if (haddr.hci_dev != HCI_DEV_NONE) {
1232 hdev = hci_dev_get(haddr.hci_dev);
1238 atomic_inc(&hdev->promisc);
1241 hci_pi(sk)->channel = haddr.hci_channel;
1243 if (!hci_sock_gen_cookie(sk)) {
1244 /* In the case when a cookie has already been assigned,
1245 * then there has been already an ioctl issued against
1246 * an unbound socket and with that triggered an open
1247 * notification. Send a close notification first to
1248 * allow the state transition to bounded.
1250 skb = create_monitor_ctrl_close(sk);
1252 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1253 HCI_SOCK_TRUSTED, NULL);
1258 if (capable(CAP_NET_ADMIN))
1259 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1261 hci_pi(sk)->hdev = hdev;
1263 /* Send event to monitor */
1264 skb = create_monitor_ctrl_open(sk);
1266 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1267 HCI_SOCK_TRUSTED, NULL);
1272 case HCI_CHANNEL_USER:
1273 if (hci_pi(sk)->hdev) {
1278 if (haddr.hci_dev == HCI_DEV_NONE) {
1283 if (!capable(CAP_NET_ADMIN)) {
1288 hdev = hci_dev_get(haddr.hci_dev);
1294 if (test_bit(HCI_INIT, &hdev->flags) ||
1295 hci_dev_test_flag(hdev, HCI_SETUP) ||
1296 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1297 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1298 test_bit(HCI_UP, &hdev->flags))) {
1304 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1310 mgmt_index_removed(hdev);
1312 err = hci_dev_open(hdev->id);
1314 if (err == -EALREADY) {
1315 /* In case the transport is already up and
1316 * running, clear the error here.
1318 * This can happen when opening a user
1319 * channel and HCI_AUTO_OFF grace period
1324 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1325 mgmt_index_added(hdev);
1331 hci_pi(sk)->channel = haddr.hci_channel;
1333 if (!hci_sock_gen_cookie(sk)) {
1334 /* In the case when a cookie has already been assigned,
1335 * this socket will transition from a raw socket into
1336 * a user channel socket. For a clean transition, send
1337 * the close notification first.
1339 skb = create_monitor_ctrl_close(sk);
1341 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1342 HCI_SOCK_TRUSTED, NULL);
1347 /* The user channel is restricted to CAP_NET_ADMIN
1348 * capabilities and with that implicitly trusted.
1350 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1352 hci_pi(sk)->hdev = hdev;
1354 /* Send event to monitor */
1355 skb = create_monitor_ctrl_open(sk);
1357 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1358 HCI_SOCK_TRUSTED, NULL);
1362 atomic_inc(&hdev->promisc);
1365 case HCI_CHANNEL_MONITOR:
1366 if (haddr.hci_dev != HCI_DEV_NONE) {
1371 if (!capable(CAP_NET_RAW)) {
1376 hci_pi(sk)->channel = haddr.hci_channel;
1378 /* The monitor interface is restricted to CAP_NET_RAW
1379 * capabilities and with that implicitly trusted.
1381 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1383 send_monitor_note(sk, "Linux version %s (%s)",
1384 init_utsname()->release,
1385 init_utsname()->machine);
1386 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1387 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1388 send_monitor_replay(sk);
1389 send_monitor_control_replay(sk);
1391 atomic_inc(&monitor_promisc);
1394 case HCI_CHANNEL_LOGGING:
1395 if (haddr.hci_dev != HCI_DEV_NONE) {
1400 if (!capable(CAP_NET_ADMIN)) {
1405 hci_pi(sk)->channel = haddr.hci_channel;
1409 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1414 if (haddr.hci_dev != HCI_DEV_NONE) {
1419 /* Users with CAP_NET_ADMIN capabilities are allowed
1420 * access to all management commands and events. For
1421 * untrusted users the interface is restricted and
1422 * also only untrusted events are sent.
1424 if (capable(CAP_NET_ADMIN))
1425 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1427 hci_pi(sk)->channel = haddr.hci_channel;
1429 /* At the moment the index and unconfigured index events
1430 * are enabled unconditionally. Setting them on each
1431 * socket when binding keeps this functionality. They
1432 * however might be cleared later and then sending of these
1433 * events will be disabled, but that is then intentional.
1435 * This also enables generic events that are safe to be
1436 * received by untrusted users. Example for such events
1437 * are changes to settings, class of device, name etc.
1439 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1440 if (!hci_sock_gen_cookie(sk)) {
1441 /* In the case when a cookie has already been
1442 * assigned, this socket will transition from
1443 * a raw socket into a control socket. To
1444 * allow for a clean transition, send the
1445 * close notification first.
1447 skb = create_monitor_ctrl_close(sk);
1449 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1450 HCI_SOCK_TRUSTED, NULL);
1455 /* Send event to monitor */
1456 skb = create_monitor_ctrl_open(sk);
1458 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1459 HCI_SOCK_TRUSTED, NULL);
1463 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1464 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1465 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1466 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1467 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1468 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1473 /* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1474 if (!hci_pi(sk)->mtu)
1475 hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1477 sk->sk_state = BT_BOUND;
1484 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1487 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1488 struct sock *sk = sock->sk;
1489 struct hci_dev *hdev;
1492 BT_DBG("sock %p sk %p", sock, sk);
1499 hdev = hci_hdev_from_sock(sk);
1501 err = PTR_ERR(hdev);
1505 haddr->hci_family = AF_BLUETOOTH;
1506 haddr->hci_dev = hdev->id;
1507 haddr->hci_channel= hci_pi(sk)->channel;
1508 err = sizeof(*haddr);
1515 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1516 struct sk_buff *skb)
1518 __u8 mask = hci_pi(sk)->cmsg_mask;
1520 if (mask & HCI_CMSG_DIR) {
1521 int incoming = bt_cb(skb)->incoming;
1522 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1526 if (mask & HCI_CMSG_TSTAMP) {
1527 #ifdef CONFIG_COMPAT
1528 struct old_timeval32 ctv;
1530 struct __kernel_old_timeval tv;
1534 skb_get_timestamp(skb, &tv);
1538 #ifdef CONFIG_COMPAT
1539 if (!COMPAT_USE_64BIT_TIME &&
1540 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1541 ctv.tv_sec = tv.tv_sec;
1542 ctv.tv_usec = tv.tv_usec;
1548 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1552 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1553 size_t len, int flags)
1555 struct scm_cookie scm;
1556 struct sock *sk = sock->sk;
1557 struct sk_buff *skb;
1559 unsigned int skblen;
1561 BT_DBG("sock %p, sk %p", sock, sk);
1563 if (flags & MSG_OOB)
1566 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1569 if (sk->sk_state == BT_CLOSED)
1572 skb = skb_recv_datagram(sk, flags, &err);
1579 msg->msg_flags |= MSG_TRUNC;
1583 skb_reset_transport_header(skb);
1584 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1586 switch (hci_pi(sk)->channel) {
1587 case HCI_CHANNEL_RAW:
1588 hci_sock_cmsg(sk, msg, skb);
1590 case HCI_CHANNEL_USER:
1591 case HCI_CHANNEL_MONITOR:
1592 sock_recv_timestamp(msg, sk, skb);
1595 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1596 sock_recv_timestamp(msg, sk, skb);
1600 memset(&scm, 0, sizeof(scm));
1601 scm.creds = bt_cb(skb)->creds;
1603 skb_free_datagram(sk, skb);
1605 if (flags & MSG_TRUNC)
1608 scm_recv(sock, msg, &scm, flags);
1610 return err ? : copied;
1613 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1614 struct sk_buff *skb)
1617 struct mgmt_hdr *hdr;
1618 u16 opcode, index, len;
1619 struct hci_dev *hdev = NULL;
1620 const struct hci_mgmt_handler *handler;
1621 bool var_len, no_hdev;
1624 BT_DBG("got %d bytes", skb->len);
1626 if (skb->len < sizeof(*hdr))
1629 hdr = (void *)skb->data;
1630 opcode = __le16_to_cpu(hdr->opcode);
1631 index = __le16_to_cpu(hdr->index);
1632 len = __le16_to_cpu(hdr->len);
1634 if (len != skb->len - sizeof(*hdr)) {
1639 if (chan->channel == HCI_CHANNEL_CONTROL) {
1640 struct sk_buff *cmd;
1642 /* Send event to monitor */
1643 cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1644 skb->data + sizeof(*hdr));
1646 hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1647 HCI_SOCK_TRUSTED, NULL);
1652 if (opcode >= chan->handler_count ||
1653 chan->handlers[opcode].func == NULL) {
1654 BT_DBG("Unknown op %u", opcode);
1655 err = mgmt_cmd_status(sk, index, opcode,
1656 MGMT_STATUS_UNKNOWN_COMMAND);
1660 handler = &chan->handlers[opcode];
1662 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1663 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1664 err = mgmt_cmd_status(sk, index, opcode,
1665 MGMT_STATUS_PERMISSION_DENIED);
1669 if (index != MGMT_INDEX_NONE) {
1670 hdev = hci_dev_get(index);
1672 err = mgmt_cmd_status(sk, index, opcode,
1673 MGMT_STATUS_INVALID_INDEX);
1677 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1678 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1679 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1680 err = mgmt_cmd_status(sk, index, opcode,
1681 MGMT_STATUS_INVALID_INDEX);
1685 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1686 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1687 err = mgmt_cmd_status(sk, index, opcode,
1688 MGMT_STATUS_INVALID_INDEX);
1693 if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1694 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1695 if (no_hdev != !hdev) {
1696 err = mgmt_cmd_status(sk, index, opcode,
1697 MGMT_STATUS_INVALID_INDEX);
1702 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1703 if ((var_len && len < handler->data_len) ||
1704 (!var_len && len != handler->data_len)) {
1705 err = mgmt_cmd_status(sk, index, opcode,
1706 MGMT_STATUS_INVALID_PARAMS);
1710 if (hdev && chan->hdev_init)
1711 chan->hdev_init(sk, hdev);
1713 cp = skb->data + sizeof(*hdr);
1715 err = handler->func(sk, hdev, cp, len);
1728 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1731 struct hci_mon_hdr *hdr;
1732 struct hci_dev *hdev;
1736 /* The logging frame consists at minimum of the standard header,
1737 * the priority byte, the ident length byte and at least one string
1738 * terminator NUL byte. Anything shorter are invalid packets.
1740 if (skb->len < sizeof(*hdr) + 3)
1743 hdr = (void *)skb->data;
1745 if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1748 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1749 __u8 priority = skb->data[sizeof(*hdr)];
1750 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1752 /* Only the priorities 0-7 are valid and with that any other
1753 * value results in an invalid packet.
1755 * The priority byte is followed by an ident length byte and
1756 * the NUL terminated ident string. Check that the ident
1757 * length is not overflowing the packet and also that the
1758 * ident string itself is NUL terminated. In case the ident
1759 * length is zero, the length value actually doubles as NUL
1760 * terminator identifier.
1762 * The message follows the ident string (if present) and
1763 * must be NUL terminated. Otherwise it is not a valid packet.
1765 if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1766 ident_len > skb->len - sizeof(*hdr) - 3 ||
1767 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1773 index = __le16_to_cpu(hdr->index);
1775 if (index != MGMT_INDEX_NONE) {
1776 hdev = hci_dev_get(index);
1783 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1785 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1794 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1797 struct sock *sk = sock->sk;
1798 struct hci_mgmt_chan *chan;
1799 struct hci_dev *hdev;
1800 struct sk_buff *skb;
1802 const unsigned int flags = msg->msg_flags;
1804 BT_DBG("sock %p sk %p", sock, sk);
1806 if (flags & MSG_OOB)
1809 if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1812 if (len < 4 || len > hci_pi(sk)->mtu)
1815 skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1817 return PTR_ERR(skb);
1821 switch (hci_pi(sk)->channel) {
1822 case HCI_CHANNEL_RAW:
1823 case HCI_CHANNEL_USER:
1825 case HCI_CHANNEL_MONITOR:
1828 case HCI_CHANNEL_LOGGING:
1829 err = hci_logging_frame(sk, skb, flags);
1832 mutex_lock(&mgmt_chan_list_lock);
1833 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1835 err = hci_mgmt_cmd(chan, sk, skb);
1839 mutex_unlock(&mgmt_chan_list_lock);
1843 hdev = hci_hdev_from_sock(sk);
1845 err = PTR_ERR(hdev);
1849 if (!test_bit(HCI_UP, &hdev->flags)) {
1854 hci_skb_pkt_type(skb) = skb->data[0];
1857 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1858 /* No permission check is needed for user channel
1859 * since that gets enforced when binding the socket.
1861 * However check that the packet type is valid.
1863 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1864 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1865 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1866 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1871 skb_queue_tail(&hdev->raw_q, skb);
1872 queue_work(hdev->workqueue, &hdev->tx_work);
1873 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1874 u16 opcode = get_unaligned_le16(skb->data);
1875 u16 ogf = hci_opcode_ogf(opcode);
1876 u16 ocf = hci_opcode_ocf(opcode);
1878 if (((ogf > HCI_SFLT_MAX_OGF) ||
1879 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1880 &hci_sec_filter.ocf_mask[ogf])) &&
1881 !capable(CAP_NET_RAW)) {
1886 /* Since the opcode has already been extracted here, store
1887 * a copy of the value for later use by the drivers.
1889 hci_skb_opcode(skb) = opcode;
1892 skb_queue_tail(&hdev->raw_q, skb);
1893 queue_work(hdev->workqueue, &hdev->tx_work);
1895 /* Stand-alone HCI commands must be flagged as
1896 * single-command requests.
1898 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1900 skb_queue_tail(&hdev->cmd_q, skb);
1901 queue_work(hdev->workqueue, &hdev->cmd_work);
1904 if (!capable(CAP_NET_RAW)) {
1909 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1910 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1911 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1916 skb_queue_tail(&hdev->raw_q, skb);
1917 queue_work(hdev->workqueue, &hdev->tx_work);
1931 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1932 sockptr_t optval, unsigned int len)
1934 struct hci_ufilter uf = { .opcode = 0 };
1935 struct sock *sk = sock->sk;
1936 int err = 0, opt = 0;
1938 BT_DBG("sk %p, opt %d", sk, optname);
1942 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1949 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1955 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1957 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1960 case HCI_TIME_STAMP:
1961 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1967 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1969 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1974 struct hci_filter *f = &hci_pi(sk)->filter;
1976 uf.type_mask = f->type_mask;
1977 uf.opcode = f->opcode;
1978 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1979 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1982 len = min_t(unsigned int, len, sizeof(uf));
1983 if (copy_from_sockptr(&uf, optval, len)) {
1988 if (!capable(CAP_NET_RAW)) {
1989 uf.type_mask &= hci_sec_filter.type_mask;
1990 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1991 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1995 struct hci_filter *f = &hci_pi(sk)->filter;
1997 f->type_mask = uf.type_mask;
1998 f->opcode = uf.opcode;
1999 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
2000 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
2014 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
2015 sockptr_t optval, unsigned int len)
2017 struct sock *sk = sock->sk;
2021 BT_DBG("sk %p, opt %d", sk, optname);
2023 if (level == SOL_HCI)
2024 return hci_sock_setsockopt_old(sock, level, optname, optval,
2027 if (level != SOL_BLUETOOTH)
2028 return -ENOPROTOOPT;
2035 switch (hci_pi(sk)->channel) {
2036 /* Don't allow changing MTU for channels that are meant for HCI
2039 case HCI_CHANNEL_RAW:
2040 case HCI_CHANNEL_USER:
2045 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
2050 hci_pi(sk)->mtu = opt;
2063 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2064 char __user *optval, int __user *optlen)
2066 struct hci_ufilter uf;
2067 struct sock *sk = sock->sk;
2068 int len, opt, err = 0;
2070 BT_DBG("sk %p, opt %d", sk, optname);
2072 if (get_user(len, optlen))
2077 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2084 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2089 if (put_user(opt, optval))
2093 case HCI_TIME_STAMP:
2094 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2099 if (put_user(opt, optval))
2105 struct hci_filter *f = &hci_pi(sk)->filter;
2107 memset(&uf, 0, sizeof(uf));
2108 uf.type_mask = f->type_mask;
2109 uf.opcode = f->opcode;
2110 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2111 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2114 len = min_t(unsigned int, len, sizeof(uf));
2115 if (copy_to_user(optval, &uf, len))
2129 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2130 char __user *optval, int __user *optlen)
2132 struct sock *sk = sock->sk;
2135 BT_DBG("sk %p, opt %d", sk, optname);
2137 if (level == SOL_HCI)
2138 return hci_sock_getsockopt_old(sock, level, optname, optval,
2141 if (level != SOL_BLUETOOTH)
2142 return -ENOPROTOOPT;
2149 if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2162 static void hci_sock_destruct(struct sock *sk)
2165 skb_queue_purge(&sk->sk_receive_queue);
2166 skb_queue_purge(&sk->sk_write_queue);
2169 static const struct proto_ops hci_sock_ops = {
2170 .family = PF_BLUETOOTH,
2171 .owner = THIS_MODULE,
2172 .release = hci_sock_release,
2173 .bind = hci_sock_bind,
2174 .getname = hci_sock_getname,
2175 .sendmsg = hci_sock_sendmsg,
2176 .recvmsg = hci_sock_recvmsg,
2177 .ioctl = hci_sock_ioctl,
2178 #ifdef CONFIG_COMPAT
2179 .compat_ioctl = hci_sock_compat_ioctl,
2181 .poll = datagram_poll,
2182 .listen = sock_no_listen,
2183 .shutdown = sock_no_shutdown,
2184 .setsockopt = hci_sock_setsockopt,
2185 .getsockopt = hci_sock_getsockopt,
2186 .connect = sock_no_connect,
2187 .socketpair = sock_no_socketpair,
2188 .accept = sock_no_accept,
2189 .mmap = sock_no_mmap
2192 static struct proto hci_sk_proto = {
2194 .owner = THIS_MODULE,
2195 .obj_size = sizeof(struct hci_pinfo)
2198 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2203 BT_DBG("sock %p", sock);
2205 if (sock->type != SOCK_RAW)
2206 return -ESOCKTNOSUPPORT;
2208 sock->ops = &hci_sock_ops;
2210 sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
2215 sock->state = SS_UNCONNECTED;
2216 sk->sk_destruct = hci_sock_destruct;
2218 bt_sock_link(&hci_sk_list, sk);
2222 static const struct net_proto_family hci_sock_family_ops = {
2223 .family = PF_BLUETOOTH,
2224 .owner = THIS_MODULE,
2225 .create = hci_sock_create,
2228 int __init hci_sock_init(void)
2232 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2234 err = proto_register(&hci_sk_proto, 0);
2238 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2240 BT_ERR("HCI socket registration failed");
2244 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2246 BT_ERR("Failed to create HCI proc file");
2247 bt_sock_unregister(BTPROTO_HCI);
2251 BT_INFO("HCI socket layer initialized");
2256 proto_unregister(&hci_sk_proto);
2260 void hci_sock_cleanup(void)
2262 bt_procfs_cleanup(&init_net, "hci");
2263 bt_sock_unregister(BTPROTO_HCI);
2264 proto_unregister(&hci_sk_proto);