2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
90 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
94 list_for_each_entry(c, &conn->chan_l, list) {
101 /* Find channel with given SCID.
102 * Returns locked socket */
103 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
105 struct l2cap_chan *c;
107 read_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 read_unlock(&conn->chan_lock);
115 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
117 struct l2cap_chan *c;
119 list_for_each_entry(c, &conn->chan_l, list) {
120 if (c->ident == ident)
126 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128 struct l2cap_chan *c;
130 read_lock(&conn->chan_lock);
131 c = __l2cap_get_chan_by_ident(conn, ident);
134 read_unlock(&conn->chan_lock);
138 struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
141 struct hlist_node *node;
142 sk_for_each(sk, node, &l2cap_sk_list.head) {
143 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
145 if (chan->sport == psm && !bacmp(&bt_sk(sk)->src, src))
154 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
156 write_lock_bh(&l2cap_sk_list.lock);
158 if (__l2cap_get_sock_by_addr(psm, src)) {
159 write_unlock_bh(&l2cap_sk_list.lock);
166 write_unlock_bh(&l2cap_sk_list.lock);
171 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
173 write_lock_bh(&l2cap_sk_list.lock);
177 write_unlock_bh(&l2cap_sk_list.lock);
182 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
184 u16 cid = L2CAP_CID_DYN_START;
186 for (; cid < L2CAP_CID_DYN_END; cid++) {
187 if (!__l2cap_get_chan_by_scid(conn, cid))
194 struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
196 struct l2cap_chan *chan;
198 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
207 void l2cap_chan_free(struct l2cap_chan *chan)
212 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
214 struct sock *sk = chan->sk;
216 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
217 chan->psm, chan->dcid);
219 conn->disc_reason = 0x13;
223 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
224 if (conn->hcon->type == LE_LINK) {
226 chan->omtu = L2CAP_LE_DEFAULT_MTU;
227 chan->scid = L2CAP_CID_LE_DATA;
228 chan->dcid = L2CAP_CID_LE_DATA;
230 /* Alloc CID for connection-oriented socket */
231 chan->scid = l2cap_alloc_cid(conn);
232 chan->omtu = L2CAP_DEFAULT_MTU;
234 } else if (sk->sk_type == SOCK_DGRAM) {
235 /* Connectionless socket */
236 chan->scid = L2CAP_CID_CONN_LESS;
237 chan->dcid = L2CAP_CID_CONN_LESS;
238 chan->omtu = L2CAP_DEFAULT_MTU;
240 /* Raw socket can send/recv signalling messages only */
241 chan->scid = L2CAP_CID_SIGNALING;
242 chan->dcid = L2CAP_CID_SIGNALING;
243 chan->omtu = L2CAP_DEFAULT_MTU;
248 list_add(&chan->list, &conn->chan_l);
252 * Must be called on the locked socket. */
253 void l2cap_chan_del(struct l2cap_chan *chan, int err)
255 struct sock *sk = chan->sk;
256 struct l2cap_conn *conn = chan->conn;
257 struct sock *parent = bt_sk(sk)->parent;
259 l2cap_sock_clear_timer(sk);
261 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
264 /* Delete from channel list */
265 write_lock_bh(&conn->chan_lock);
266 list_del(&chan->list);
267 write_unlock_bh(&conn->chan_lock);
271 hci_conn_put(conn->hcon);
274 sk->sk_state = BT_CLOSED;
275 sock_set_flag(sk, SOCK_ZAPPED);
281 bt_accept_unlink(sk);
282 parent->sk_data_ready(parent, 0);
284 sk->sk_state_change(sk);
286 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
287 chan->conf_state & L2CAP_CONF_INPUT_DONE))
290 skb_queue_purge(&chan->tx_q);
292 if (chan->mode == L2CAP_MODE_ERTM) {
293 struct srej_list *l, *tmp;
295 del_timer(&chan->retrans_timer);
296 del_timer(&chan->monitor_timer);
297 del_timer(&chan->ack_timer);
299 skb_queue_purge(&chan->srej_q);
300 skb_queue_purge(&chan->busy_q);
302 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
309 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
311 struct sock *sk = chan->sk;
313 if (sk->sk_type == SOCK_RAW) {
314 switch (chan->sec_level) {
315 case BT_SECURITY_HIGH:
316 return HCI_AT_DEDICATED_BONDING_MITM;
317 case BT_SECURITY_MEDIUM:
318 return HCI_AT_DEDICATED_BONDING;
320 return HCI_AT_NO_BONDING;
322 } else if (chan->psm == cpu_to_le16(0x0001)) {
323 if (chan->sec_level == BT_SECURITY_LOW)
324 chan->sec_level = BT_SECURITY_SDP;
326 if (chan->sec_level == BT_SECURITY_HIGH)
327 return HCI_AT_NO_BONDING_MITM;
329 return HCI_AT_NO_BONDING;
331 switch (chan->sec_level) {
332 case BT_SECURITY_HIGH:
333 return HCI_AT_GENERAL_BONDING_MITM;
334 case BT_SECURITY_MEDIUM:
335 return HCI_AT_GENERAL_BONDING;
337 return HCI_AT_NO_BONDING;
342 /* Service level security */
343 static inline int l2cap_check_security(struct l2cap_chan *chan)
345 struct l2cap_conn *conn = chan->conn;
348 auth_type = l2cap_get_auth_type(chan);
350 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
353 u8 l2cap_get_ident(struct l2cap_conn *conn)
357 /* Get next available identificator.
358 * 1 - 128 are used by kernel.
359 * 129 - 199 are reserved.
360 * 200 - 254 are used by utilities like l2ping, etc.
363 spin_lock_bh(&conn->lock);
365 if (++conn->tx_ident > 128)
370 spin_unlock_bh(&conn->lock);
375 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
377 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
380 BT_DBG("code 0x%2.2x", code);
385 if (lmp_no_flush_capable(conn->hcon->hdev))
386 flags = ACL_START_NO_FLUSH;
390 hci_send_acl(conn->hcon, skb, flags);
393 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
396 struct l2cap_hdr *lh;
397 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
398 struct l2cap_conn *conn = chan->conn;
399 struct sock *sk = (struct sock *)pi;
400 int count, hlen = L2CAP_HDR_SIZE + 2;
403 if (sk->sk_state != BT_CONNECTED)
406 if (chan->fcs == L2CAP_FCS_CRC16)
409 BT_DBG("chan %p, control 0x%2.2x", chan, control);
411 count = min_t(unsigned int, conn->mtu, hlen);
412 control |= L2CAP_CTRL_FRAME_TYPE;
414 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
415 control |= L2CAP_CTRL_FINAL;
416 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
419 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
420 control |= L2CAP_CTRL_POLL;
421 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
424 skb = bt_skb_alloc(count, GFP_ATOMIC);
428 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
429 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
430 lh->cid = cpu_to_le16(chan->dcid);
431 put_unaligned_le16(control, skb_put(skb, 2));
433 if (chan->fcs == L2CAP_FCS_CRC16) {
434 u16 fcs = crc16(0, (u8 *)lh, count - 2);
435 put_unaligned_le16(fcs, skb_put(skb, 2));
438 if (lmp_no_flush_capable(conn->hcon->hdev))
439 flags = ACL_START_NO_FLUSH;
443 hci_send_acl(chan->conn->hcon, skb, flags);
446 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
448 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
449 control |= L2CAP_SUPER_RCV_NOT_READY;
450 chan->conn_state |= L2CAP_CONN_RNR_SENT;
452 control |= L2CAP_SUPER_RCV_READY;
454 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
456 l2cap_send_sframe(chan, control);
459 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
461 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
464 static void l2cap_do_start(struct l2cap_chan *chan)
466 struct l2cap_conn *conn = chan->conn;
468 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
469 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
472 if (l2cap_check_security(chan) &&
473 __l2cap_no_conn_pending(chan)) {
474 struct l2cap_conn_req req;
475 req.scid = cpu_to_le16(chan->scid);
478 chan->ident = l2cap_get_ident(conn);
479 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
481 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
485 struct l2cap_info_req req;
486 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
488 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
489 conn->info_ident = l2cap_get_ident(conn);
491 mod_timer(&conn->info_timer, jiffies +
492 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
494 l2cap_send_cmd(conn, conn->info_ident,
495 L2CAP_INFO_REQ, sizeof(req), &req);
499 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
501 u32 local_feat_mask = l2cap_feat_mask;
503 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
506 case L2CAP_MODE_ERTM:
507 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
508 case L2CAP_MODE_STREAMING:
509 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
515 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
518 struct l2cap_disconn_req req;
525 if (chan->mode == L2CAP_MODE_ERTM) {
526 del_timer(&chan->retrans_timer);
527 del_timer(&chan->monitor_timer);
528 del_timer(&chan->ack_timer);
531 req.dcid = cpu_to_le16(chan->dcid);
532 req.scid = cpu_to_le16(chan->scid);
533 l2cap_send_cmd(conn, l2cap_get_ident(conn),
534 L2CAP_DISCONN_REQ, sizeof(req), &req);
536 sk->sk_state = BT_DISCONN;
540 /* ---- L2CAP connections ---- */
541 static void l2cap_conn_start(struct l2cap_conn *conn)
543 struct l2cap_chan *chan, *tmp;
545 BT_DBG("conn %p", conn);
547 read_lock(&conn->chan_lock);
549 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
550 struct sock *sk = chan->sk;
554 if (sk->sk_type != SOCK_SEQPACKET &&
555 sk->sk_type != SOCK_STREAM) {
560 if (sk->sk_state == BT_CONNECT) {
561 struct l2cap_conn_req req;
563 if (!l2cap_check_security(chan) ||
564 !__l2cap_no_conn_pending(chan)) {
569 if (!l2cap_mode_supported(chan->mode,
571 && chan->conf_state &
572 L2CAP_CONF_STATE2_DEVICE) {
573 /* __l2cap_sock_close() calls list_del(chan)
574 * so release the lock */
575 read_unlock_bh(&conn->chan_lock);
576 __l2cap_sock_close(sk, ECONNRESET);
577 read_lock_bh(&conn->chan_lock);
582 req.scid = cpu_to_le16(chan->scid);
585 chan->ident = l2cap_get_ident(conn);
586 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
588 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
591 } else if (sk->sk_state == BT_CONNECT2) {
592 struct l2cap_conn_rsp rsp;
594 rsp.scid = cpu_to_le16(chan->dcid);
595 rsp.dcid = cpu_to_le16(chan->scid);
597 if (l2cap_check_security(chan)) {
598 if (bt_sk(sk)->defer_setup) {
599 struct sock *parent = bt_sk(sk)->parent;
600 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
601 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
602 parent->sk_data_ready(parent, 0);
605 sk->sk_state = BT_CONFIG;
606 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
607 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
610 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
611 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
614 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
617 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
618 rsp.result != L2CAP_CR_SUCCESS) {
623 chan->conf_state |= L2CAP_CONF_REQ_SENT;
624 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
625 l2cap_build_conf_req(chan, buf), buf);
626 chan->num_conf_req++;
632 read_unlock(&conn->chan_lock);
635 /* Find socket with cid and source bdaddr.
636 * Returns closest match, locked.
638 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
640 struct sock *sk = NULL, *sk1 = NULL;
641 struct hlist_node *node;
643 read_lock(&l2cap_sk_list.lock);
645 sk_for_each(sk, node, &l2cap_sk_list.head) {
646 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
648 if (state && sk->sk_state != state)
651 if (chan->scid == cid) {
653 if (!bacmp(&bt_sk(sk)->src, src))
657 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
662 read_unlock(&l2cap_sk_list.lock);
664 return node ? sk : sk1;
667 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
669 struct sock *parent, *sk;
670 struct l2cap_chan *chan;
674 /* Check if we have socket listening on cid */
675 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
680 bh_lock_sock(parent);
682 /* Check for backlog size */
683 if (sk_acceptq_is_full(parent)) {
684 BT_DBG("backlog full %d", parent->sk_ack_backlog);
688 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
692 chan = l2cap_chan_alloc(sk);
698 l2cap_pi(sk)->chan = chan;
700 write_lock_bh(&conn->chan_lock);
702 hci_conn_hold(conn->hcon);
704 l2cap_sock_init(sk, parent);
706 bacpy(&bt_sk(sk)->src, conn->src);
707 bacpy(&bt_sk(sk)->dst, conn->dst);
709 bt_accept_enqueue(parent, sk);
711 __l2cap_chan_add(conn, chan);
713 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
715 sk->sk_state = BT_CONNECTED;
716 parent->sk_data_ready(parent, 0);
718 write_unlock_bh(&conn->chan_lock);
721 bh_unlock_sock(parent);
724 static void l2cap_conn_ready(struct l2cap_conn *conn)
726 struct l2cap_chan *chan;
728 BT_DBG("conn %p", conn);
730 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
731 l2cap_le_conn_ready(conn);
733 read_lock(&conn->chan_lock);
735 list_for_each_entry(chan, &conn->chan_l, list) {
736 struct sock *sk = chan->sk;
740 if (conn->hcon->type == LE_LINK) {
741 l2cap_sock_clear_timer(sk);
742 sk->sk_state = BT_CONNECTED;
743 sk->sk_state_change(sk);
746 if (sk->sk_type != SOCK_SEQPACKET &&
747 sk->sk_type != SOCK_STREAM) {
748 l2cap_sock_clear_timer(sk);
749 sk->sk_state = BT_CONNECTED;
750 sk->sk_state_change(sk);
751 } else if (sk->sk_state == BT_CONNECT)
752 l2cap_do_start(chan);
757 read_unlock(&conn->chan_lock);
760 /* Notify sockets that we cannot guaranty reliability anymore */
761 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
763 struct l2cap_chan *chan;
765 BT_DBG("conn %p", conn);
767 read_lock(&conn->chan_lock);
769 list_for_each_entry(chan, &conn->chan_l, list) {
770 struct sock *sk = chan->sk;
772 if (chan->force_reliable)
776 read_unlock(&conn->chan_lock);
779 static void l2cap_info_timeout(unsigned long arg)
781 struct l2cap_conn *conn = (void *) arg;
783 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
784 conn->info_ident = 0;
786 l2cap_conn_start(conn);
789 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
791 struct l2cap_conn *conn = hcon->l2cap_data;
796 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
800 hcon->l2cap_data = conn;
803 BT_DBG("hcon %p conn %p", hcon, conn);
805 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
806 conn->mtu = hcon->hdev->le_mtu;
808 conn->mtu = hcon->hdev->acl_mtu;
810 conn->src = &hcon->hdev->bdaddr;
811 conn->dst = &hcon->dst;
815 spin_lock_init(&conn->lock);
816 rwlock_init(&conn->chan_lock);
818 INIT_LIST_HEAD(&conn->chan_l);
820 if (hcon->type != LE_LINK)
821 setup_timer(&conn->info_timer, l2cap_info_timeout,
822 (unsigned long) conn);
824 conn->disc_reason = 0x13;
829 static void l2cap_conn_del(struct hci_conn *hcon, int err)
831 struct l2cap_conn *conn = hcon->l2cap_data;
832 struct l2cap_chan *chan, *l;
838 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
840 kfree_skb(conn->rx_skb);
843 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
846 l2cap_chan_del(chan, err);
851 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
852 del_timer_sync(&conn->info_timer);
854 hcon->l2cap_data = NULL;
858 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
860 write_lock_bh(&conn->chan_lock);
861 __l2cap_chan_add(conn, chan);
862 write_unlock_bh(&conn->chan_lock);
865 /* ---- Socket interface ---- */
867 /* Find socket with psm and source bdaddr.
868 * Returns closest match.
870 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
872 struct sock *sk = NULL, *sk1 = NULL;
873 struct hlist_node *node;
875 read_lock(&l2cap_sk_list.lock);
877 sk_for_each(sk, node, &l2cap_sk_list.head) {
878 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
880 if (state && sk->sk_state != state)
883 if (chan->psm == psm) {
885 if (!bacmp(&bt_sk(sk)->src, src))
889 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
894 read_unlock(&l2cap_sk_list.lock);
896 return node ? sk : sk1;
899 int l2cap_chan_connect(struct l2cap_chan *chan)
901 struct sock *sk = chan->sk;
902 bdaddr_t *src = &bt_sk(sk)->src;
903 bdaddr_t *dst = &bt_sk(sk)->dst;
904 struct l2cap_conn *conn;
905 struct hci_conn *hcon;
906 struct hci_dev *hdev;
910 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
913 hdev = hci_get_route(dst, src);
915 return -EHOSTUNREACH;
917 hci_dev_lock_bh(hdev);
919 auth_type = l2cap_get_auth_type(chan);
921 if (chan->dcid == L2CAP_CID_LE_DATA)
922 hcon = hci_connect(hdev, LE_LINK, dst,
923 chan->sec_level, auth_type);
925 hcon = hci_connect(hdev, ACL_LINK, dst,
926 chan->sec_level, auth_type);
933 conn = l2cap_conn_add(hcon, 0);
940 /* Update source addr of the socket */
941 bacpy(src, conn->src);
943 l2cap_chan_add(conn, chan);
945 sk->sk_state = BT_CONNECT;
946 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
948 if (hcon->state == BT_CONNECTED) {
949 if (sk->sk_type != SOCK_SEQPACKET &&
950 sk->sk_type != SOCK_STREAM) {
951 l2cap_sock_clear_timer(sk);
952 if (l2cap_check_security(chan))
953 sk->sk_state = BT_CONNECTED;
955 l2cap_do_start(chan);
961 hci_dev_unlock_bh(hdev);
966 int __l2cap_wait_ack(struct sock *sk)
968 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
969 DECLARE_WAITQUEUE(wait, current);
973 add_wait_queue(sk_sleep(sk), &wait);
974 while ((chan->unacked_frames > 0 && chan->conn)) {
975 set_current_state(TASK_INTERRUPTIBLE);
980 if (signal_pending(current)) {
981 err = sock_intr_errno(timeo);
986 timeo = schedule_timeout(timeo);
989 err = sock_error(sk);
993 set_current_state(TASK_RUNNING);
994 remove_wait_queue(sk_sleep(sk), &wait);
998 static void l2cap_monitor_timeout(unsigned long arg)
1000 struct l2cap_chan *chan = (void *) arg;
1001 struct sock *sk = chan->sk;
1003 BT_DBG("chan %p", chan);
1006 if (chan->retry_count >= chan->remote_max_tx) {
1007 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1012 chan->retry_count++;
1013 __mod_monitor_timer();
1015 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1019 static void l2cap_retrans_timeout(unsigned long arg)
1021 struct l2cap_chan *chan = (void *) arg;
1022 struct sock *sk = chan->sk;
1024 BT_DBG("chan %p", chan);
1027 chan->retry_count = 1;
1028 __mod_monitor_timer();
1030 chan->conn_state |= L2CAP_CONN_WAIT_F;
1032 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1036 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1038 struct sk_buff *skb;
1040 while ((skb = skb_peek(&chan->tx_q)) &&
1041 chan->unacked_frames) {
1042 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1045 skb = skb_dequeue(&chan->tx_q);
1048 chan->unacked_frames--;
1051 if (!chan->unacked_frames)
1052 del_timer(&chan->retrans_timer);
1055 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1057 struct hci_conn *hcon = chan->conn->hcon;
1060 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1062 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1063 flags = ACL_START_NO_FLUSH;
1067 hci_send_acl(hcon, skb, flags);
1070 void l2cap_streaming_send(struct l2cap_chan *chan)
1072 struct sk_buff *skb;
1075 while ((skb = skb_dequeue(&chan->tx_q))) {
1076 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1077 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1078 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1080 if (chan->fcs == L2CAP_FCS_CRC16) {
1081 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1082 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1085 l2cap_do_send(chan, skb);
1087 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1091 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1093 struct sk_buff *skb, *tx_skb;
1096 skb = skb_peek(&chan->tx_q);
1101 if (bt_cb(skb)->tx_seq == tx_seq)
1104 if (skb_queue_is_last(&chan->tx_q, skb))
1107 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1109 if (chan->remote_max_tx &&
1110 bt_cb(skb)->retries == chan->remote_max_tx) {
1111 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1115 tx_skb = skb_clone(skb, GFP_ATOMIC);
1116 bt_cb(skb)->retries++;
1117 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1118 control &= L2CAP_CTRL_SAR;
1120 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1121 control |= L2CAP_CTRL_FINAL;
1122 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1125 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1126 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1128 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1130 if (chan->fcs == L2CAP_FCS_CRC16) {
1131 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1132 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1135 l2cap_do_send(chan, tx_skb);
1138 int l2cap_ertm_send(struct l2cap_chan *chan)
1140 struct sk_buff *skb, *tx_skb;
1141 struct sock *sk = chan->sk;
1145 if (sk->sk_state != BT_CONNECTED)
1148 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1150 if (chan->remote_max_tx &&
1151 bt_cb(skb)->retries == chan->remote_max_tx) {
1152 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1156 tx_skb = skb_clone(skb, GFP_ATOMIC);
1158 bt_cb(skb)->retries++;
1160 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1161 control &= L2CAP_CTRL_SAR;
1163 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1164 control |= L2CAP_CTRL_FINAL;
1165 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1167 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1168 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1169 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1172 if (chan->fcs == L2CAP_FCS_CRC16) {
1173 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1174 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1177 l2cap_do_send(chan, tx_skb);
1179 __mod_retrans_timer();
1181 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1182 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1184 if (bt_cb(skb)->retries == 1)
1185 chan->unacked_frames++;
1187 chan->frames_sent++;
1189 if (skb_queue_is_last(&chan->tx_q, skb))
1190 chan->tx_send_head = NULL;
1192 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1200 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1204 if (!skb_queue_empty(&chan->tx_q))
1205 chan->tx_send_head = chan->tx_q.next;
1207 chan->next_tx_seq = chan->expected_ack_seq;
1208 ret = l2cap_ertm_send(chan);
1212 static void l2cap_send_ack(struct l2cap_chan *chan)
1216 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1218 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1219 control |= L2CAP_SUPER_RCV_NOT_READY;
1220 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1221 l2cap_send_sframe(chan, control);
1225 if (l2cap_ertm_send(chan) > 0)
1228 control |= L2CAP_SUPER_RCV_READY;
1229 l2cap_send_sframe(chan, control);
1232 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1234 struct srej_list *tail;
1237 control = L2CAP_SUPER_SELECT_REJECT;
1238 control |= L2CAP_CTRL_FINAL;
1240 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1241 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1243 l2cap_send_sframe(chan, control);
1246 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1248 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1249 struct sk_buff **frag;
1252 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1258 /* Continuation fragments (no L2CAP header) */
1259 frag = &skb_shinfo(skb)->frag_list;
1261 count = min_t(unsigned int, conn->mtu, len);
1263 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1266 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1272 frag = &(*frag)->next;
1278 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1280 struct sock *sk = chan->sk;
1281 struct l2cap_conn *conn = chan->conn;
1282 struct sk_buff *skb;
1283 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1284 struct l2cap_hdr *lh;
1286 BT_DBG("sk %p len %d", sk, (int)len);
1288 count = min_t(unsigned int, (conn->mtu - hlen), len);
1289 skb = bt_skb_send_alloc(sk, count + hlen,
1290 msg->msg_flags & MSG_DONTWAIT, &err);
1292 return ERR_PTR(err);
1294 /* Create L2CAP header */
1295 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1296 lh->cid = cpu_to_le16(chan->dcid);
1297 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1298 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1300 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1301 if (unlikely(err < 0)) {
1303 return ERR_PTR(err);
1308 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1310 struct sock *sk = chan->sk;
1311 struct l2cap_conn *conn = chan->conn;
1312 struct sk_buff *skb;
1313 int err, count, hlen = L2CAP_HDR_SIZE;
1314 struct l2cap_hdr *lh;
1316 BT_DBG("sk %p len %d", sk, (int)len);
1318 count = min_t(unsigned int, (conn->mtu - hlen), len);
1319 skb = bt_skb_send_alloc(sk, count + hlen,
1320 msg->msg_flags & MSG_DONTWAIT, &err);
1322 return ERR_PTR(err);
1324 /* Create L2CAP header */
1325 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1326 lh->cid = cpu_to_le16(chan->dcid);
1327 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1329 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1330 if (unlikely(err < 0)) {
1332 return ERR_PTR(err);
1337 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1339 struct sock *sk = chan->sk;
1340 struct l2cap_conn *conn = chan->conn;
1341 struct sk_buff *skb;
1342 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1343 struct l2cap_hdr *lh;
1345 BT_DBG("sk %p len %d", sk, (int)len);
1348 return ERR_PTR(-ENOTCONN);
1353 if (chan->fcs == L2CAP_FCS_CRC16)
1356 count = min_t(unsigned int, (conn->mtu - hlen), len);
1357 skb = bt_skb_send_alloc(sk, count + hlen,
1358 msg->msg_flags & MSG_DONTWAIT, &err);
1360 return ERR_PTR(err);
1362 /* Create L2CAP header */
1363 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1364 lh->cid = cpu_to_le16(chan->dcid);
1365 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1366 put_unaligned_le16(control, skb_put(skb, 2));
1368 put_unaligned_le16(sdulen, skb_put(skb, 2));
1370 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1371 if (unlikely(err < 0)) {
1373 return ERR_PTR(err);
1376 if (chan->fcs == L2CAP_FCS_CRC16)
1377 put_unaligned_le16(0, skb_put(skb, 2));
1379 bt_cb(skb)->retries = 0;
1383 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1385 struct sk_buff *skb;
1386 struct sk_buff_head sar_queue;
1390 skb_queue_head_init(&sar_queue);
1391 control = L2CAP_SDU_START;
1392 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1394 return PTR_ERR(skb);
1396 __skb_queue_tail(&sar_queue, skb);
1397 len -= chan->remote_mps;
1398 size += chan->remote_mps;
1403 if (len > chan->remote_mps) {
1404 control = L2CAP_SDU_CONTINUE;
1405 buflen = chan->remote_mps;
1407 control = L2CAP_SDU_END;
1411 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1413 skb_queue_purge(&sar_queue);
1414 return PTR_ERR(skb);
1417 __skb_queue_tail(&sar_queue, skb);
1421 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1422 if (chan->tx_send_head == NULL)
1423 chan->tx_send_head = sar_queue.next;
1428 static void l2cap_chan_ready(struct sock *sk)
1430 struct sock *parent = bt_sk(sk)->parent;
1431 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1433 BT_DBG("sk %p, parent %p", sk, parent);
1435 chan->conf_state = 0;
1436 l2cap_sock_clear_timer(sk);
1439 /* Outgoing channel.
1440 * Wake up socket sleeping on connect.
1442 sk->sk_state = BT_CONNECTED;
1443 sk->sk_state_change(sk);
1445 /* Incoming channel.
1446 * Wake up socket sleeping on accept.
1448 parent->sk_data_ready(parent, 0);
1452 /* Copy frame to all raw sockets on that connection */
1453 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1455 struct sk_buff *nskb;
1456 struct l2cap_chan *chan;
1458 BT_DBG("conn %p", conn);
1460 read_lock(&conn->chan_lock);
1461 list_for_each_entry(chan, &conn->chan_l, list) {
1462 struct sock *sk = chan->sk;
1463 if (sk->sk_type != SOCK_RAW)
1466 /* Don't send frame to the socket it came from */
1469 nskb = skb_clone(skb, GFP_ATOMIC);
1473 if (sock_queue_rcv_skb(sk, nskb))
1476 read_unlock(&conn->chan_lock);
1479 /* ---- L2CAP signalling commands ---- */
1480 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1481 u8 code, u8 ident, u16 dlen, void *data)
1483 struct sk_buff *skb, **frag;
1484 struct l2cap_cmd_hdr *cmd;
1485 struct l2cap_hdr *lh;
1488 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1489 conn, code, ident, dlen);
1491 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1492 count = min_t(unsigned int, conn->mtu, len);
1494 skb = bt_skb_alloc(count, GFP_ATOMIC);
1498 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1499 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1501 if (conn->hcon->type == LE_LINK)
1502 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1504 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1506 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1509 cmd->len = cpu_to_le16(dlen);
1512 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1513 memcpy(skb_put(skb, count), data, count);
1519 /* Continuation fragments (no L2CAP header) */
1520 frag = &skb_shinfo(skb)->frag_list;
1522 count = min_t(unsigned int, conn->mtu, len);
1524 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1528 memcpy(skb_put(*frag, count), data, count);
1533 frag = &(*frag)->next;
1543 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1545 struct l2cap_conf_opt *opt = *ptr;
1548 len = L2CAP_CONF_OPT_SIZE + opt->len;
1556 *val = *((u8 *) opt->val);
1560 *val = get_unaligned_le16(opt->val);
1564 *val = get_unaligned_le32(opt->val);
1568 *val = (unsigned long) opt->val;
1572 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1576 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1578 struct l2cap_conf_opt *opt = *ptr;
1580 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1587 *((u8 *) opt->val) = val;
1591 put_unaligned_le16(val, opt->val);
1595 put_unaligned_le32(val, opt->val);
1599 memcpy(opt->val, (void *) val, len);
1603 *ptr += L2CAP_CONF_OPT_SIZE + len;
1606 static void l2cap_ack_timeout(unsigned long arg)
1608 struct l2cap_chan *chan = (void *) arg;
1610 bh_lock_sock(chan->sk);
1611 l2cap_send_ack(chan);
1612 bh_unlock_sock(chan->sk);
1615 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1617 struct sock *sk = chan->sk;
1619 chan->expected_ack_seq = 0;
1620 chan->unacked_frames = 0;
1621 chan->buffer_seq = 0;
1622 chan->num_acked = 0;
1623 chan->frames_sent = 0;
1625 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1626 (unsigned long) chan);
1627 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1628 (unsigned long) chan);
1629 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1631 skb_queue_head_init(&chan->srej_q);
1632 skb_queue_head_init(&chan->busy_q);
1634 INIT_LIST_HEAD(&chan->srej_l);
1636 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1638 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1641 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1644 case L2CAP_MODE_STREAMING:
1645 case L2CAP_MODE_ERTM:
1646 if (l2cap_mode_supported(mode, remote_feat_mask))
1650 return L2CAP_MODE_BASIC;
1654 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1656 struct l2cap_conf_req *req = data;
1657 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1658 void *ptr = req->data;
1660 BT_DBG("chan %p", chan);
1662 if (chan->num_conf_req || chan->num_conf_rsp)
1665 switch (chan->mode) {
1666 case L2CAP_MODE_STREAMING:
1667 case L2CAP_MODE_ERTM:
1668 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1673 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1678 if (chan->imtu != L2CAP_DEFAULT_MTU)
1679 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1681 switch (chan->mode) {
1682 case L2CAP_MODE_BASIC:
1683 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1684 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1687 rfc.mode = L2CAP_MODE_BASIC;
1689 rfc.max_transmit = 0;
1690 rfc.retrans_timeout = 0;
1691 rfc.monitor_timeout = 0;
1692 rfc.max_pdu_size = 0;
1694 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1695 (unsigned long) &rfc);
1698 case L2CAP_MODE_ERTM:
1699 rfc.mode = L2CAP_MODE_ERTM;
1700 rfc.txwin_size = chan->tx_win;
1701 rfc.max_transmit = chan->max_tx;
1702 rfc.retrans_timeout = 0;
1703 rfc.monitor_timeout = 0;
1704 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1705 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1706 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1708 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1709 (unsigned long) &rfc);
1711 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1714 if (chan->fcs == L2CAP_FCS_NONE ||
1715 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1716 chan->fcs = L2CAP_FCS_NONE;
1717 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1721 case L2CAP_MODE_STREAMING:
1722 rfc.mode = L2CAP_MODE_STREAMING;
1724 rfc.max_transmit = 0;
1725 rfc.retrans_timeout = 0;
1726 rfc.monitor_timeout = 0;
1727 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1728 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1729 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1731 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1732 (unsigned long) &rfc);
1734 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1737 if (chan->fcs == L2CAP_FCS_NONE ||
1738 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1739 chan->fcs = L2CAP_FCS_NONE;
1740 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1745 req->dcid = cpu_to_le16(chan->dcid);
1746 req->flags = cpu_to_le16(0);
1751 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1753 struct l2cap_conf_rsp *rsp = data;
1754 void *ptr = rsp->data;
1755 void *req = chan->conf_req;
1756 int len = chan->conf_len;
1757 int type, hint, olen;
1759 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1760 u16 mtu = L2CAP_DEFAULT_MTU;
1761 u16 result = L2CAP_CONF_SUCCESS;
1763 BT_DBG("chan %p", chan);
1765 while (len >= L2CAP_CONF_OPT_SIZE) {
1766 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1768 hint = type & L2CAP_CONF_HINT;
1769 type &= L2CAP_CONF_MASK;
1772 case L2CAP_CONF_MTU:
1776 case L2CAP_CONF_FLUSH_TO:
1777 chan->flush_to = val;
1780 case L2CAP_CONF_QOS:
1783 case L2CAP_CONF_RFC:
1784 if (olen == sizeof(rfc))
1785 memcpy(&rfc, (void *) val, olen);
1788 case L2CAP_CONF_FCS:
1789 if (val == L2CAP_FCS_NONE)
1790 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1798 result = L2CAP_CONF_UNKNOWN;
1799 *((u8 *) ptr++) = type;
1804 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1807 switch (chan->mode) {
1808 case L2CAP_MODE_STREAMING:
1809 case L2CAP_MODE_ERTM:
1810 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1811 chan->mode = l2cap_select_mode(rfc.mode,
1812 chan->conn->feat_mask);
1816 if (chan->mode != rfc.mode)
1817 return -ECONNREFUSED;
1823 if (chan->mode != rfc.mode) {
1824 result = L2CAP_CONF_UNACCEPT;
1825 rfc.mode = chan->mode;
1827 if (chan->num_conf_rsp == 1)
1828 return -ECONNREFUSED;
1830 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1831 sizeof(rfc), (unsigned long) &rfc);
1835 if (result == L2CAP_CONF_SUCCESS) {
1836 /* Configure output options and let the other side know
1837 * which ones we don't like. */
1839 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1840 result = L2CAP_CONF_UNACCEPT;
1843 chan->conf_state |= L2CAP_CONF_MTU_DONE;
1845 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1848 case L2CAP_MODE_BASIC:
1849 chan->fcs = L2CAP_FCS_NONE;
1850 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1853 case L2CAP_MODE_ERTM:
1854 chan->remote_tx_win = rfc.txwin_size;
1855 chan->remote_max_tx = rfc.max_transmit;
1857 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1858 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1860 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1862 rfc.retrans_timeout =
1863 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1864 rfc.monitor_timeout =
1865 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1867 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1869 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1870 sizeof(rfc), (unsigned long) &rfc);
1874 case L2CAP_MODE_STREAMING:
1875 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1876 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1878 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1880 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1882 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1883 sizeof(rfc), (unsigned long) &rfc);
1888 result = L2CAP_CONF_UNACCEPT;
1890 memset(&rfc, 0, sizeof(rfc));
1891 rfc.mode = chan->mode;
1894 if (result == L2CAP_CONF_SUCCESS)
1895 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1897 rsp->scid = cpu_to_le16(chan->dcid);
1898 rsp->result = cpu_to_le16(result);
1899 rsp->flags = cpu_to_le16(0x0000);
1904 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
1906 struct l2cap_conf_req *req = data;
1907 void *ptr = req->data;
1910 struct l2cap_conf_rfc rfc;
1912 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
1914 while (len >= L2CAP_CONF_OPT_SIZE) {
1915 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1918 case L2CAP_CONF_MTU:
1919 if (val < L2CAP_DEFAULT_MIN_MTU) {
1920 *result = L2CAP_CONF_UNACCEPT;
1921 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
1924 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1927 case L2CAP_CONF_FLUSH_TO:
1928 chan->flush_to = val;
1929 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1933 case L2CAP_CONF_RFC:
1934 if (olen == sizeof(rfc))
1935 memcpy(&rfc, (void *)val, olen);
1937 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1938 rfc.mode != chan->mode)
1939 return -ECONNREFUSED;
1943 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1944 sizeof(rfc), (unsigned long) &rfc);
1949 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
1950 return -ECONNREFUSED;
1952 chan->mode = rfc.mode;
1954 if (*result == L2CAP_CONF_SUCCESS) {
1956 case L2CAP_MODE_ERTM:
1957 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1958 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1959 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1961 case L2CAP_MODE_STREAMING:
1962 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1966 req->dcid = cpu_to_le16(chan->dcid);
1967 req->flags = cpu_to_le16(0x0000);
1972 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
1974 struct l2cap_conf_rsp *rsp = data;
1975 void *ptr = rsp->data;
1977 BT_DBG("chan %p", chan);
1979 rsp->scid = cpu_to_le16(chan->dcid);
1980 rsp->result = cpu_to_le16(result);
1981 rsp->flags = cpu_to_le16(flags);
1986 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
1988 struct l2cap_conn_rsp rsp;
1989 struct l2cap_conn *conn = chan->conn;
1992 rsp.scid = cpu_to_le16(chan->dcid);
1993 rsp.dcid = cpu_to_le16(chan->scid);
1994 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1995 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1996 l2cap_send_cmd(conn, chan->ident,
1997 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1999 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2002 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2003 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2004 l2cap_build_conf_req(chan, buf), buf);
2005 chan->num_conf_req++;
2008 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2012 struct l2cap_conf_rfc rfc;
2014 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2016 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2019 while (len >= L2CAP_CONF_OPT_SIZE) {
2020 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2023 case L2CAP_CONF_RFC:
2024 if (olen == sizeof(rfc))
2025 memcpy(&rfc, (void *)val, olen);
2032 case L2CAP_MODE_ERTM:
2033 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2034 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2035 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2037 case L2CAP_MODE_STREAMING:
2038 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2042 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2044 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2046 if (rej->reason != 0x0000)
2049 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2050 cmd->ident == conn->info_ident) {
2051 del_timer(&conn->info_timer);
2053 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2054 conn->info_ident = 0;
2056 l2cap_conn_start(conn);
2062 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2064 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2065 struct l2cap_conn_rsp rsp;
2066 struct l2cap_chan *chan = NULL;
2067 struct sock *parent, *sk = NULL;
2068 int result, status = L2CAP_CS_NO_INFO;
2070 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2071 __le16 psm = req->psm;
2073 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2075 /* Check if we have socket listening on psm */
2076 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2078 result = L2CAP_CR_BAD_PSM;
2082 bh_lock_sock(parent);
2084 /* Check if the ACL is secure enough (if not SDP) */
2085 if (psm != cpu_to_le16(0x0001) &&
2086 !hci_conn_check_link_mode(conn->hcon)) {
2087 conn->disc_reason = 0x05;
2088 result = L2CAP_CR_SEC_BLOCK;
2092 result = L2CAP_CR_NO_MEM;
2094 /* Check for backlog size */
2095 if (sk_acceptq_is_full(parent)) {
2096 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2100 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2104 chan = l2cap_chan_alloc(sk);
2106 l2cap_sock_kill(sk);
2110 l2cap_pi(sk)->chan = chan;
2112 write_lock_bh(&conn->chan_lock);
2114 /* Check if we already have channel with that dcid */
2115 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2116 write_unlock_bh(&conn->chan_lock);
2117 sock_set_flag(sk, SOCK_ZAPPED);
2118 l2cap_sock_kill(sk);
2122 hci_conn_hold(conn->hcon);
2124 l2cap_sock_init(sk, parent);
2125 bacpy(&bt_sk(sk)->src, conn->src);
2126 bacpy(&bt_sk(sk)->dst, conn->dst);
2130 bt_accept_enqueue(parent, sk);
2132 __l2cap_chan_add(conn, chan);
2136 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2138 chan->ident = cmd->ident;
2140 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2141 if (l2cap_check_security(chan)) {
2142 if (bt_sk(sk)->defer_setup) {
2143 sk->sk_state = BT_CONNECT2;
2144 result = L2CAP_CR_PEND;
2145 status = L2CAP_CS_AUTHOR_PEND;
2146 parent->sk_data_ready(parent, 0);
2148 sk->sk_state = BT_CONFIG;
2149 result = L2CAP_CR_SUCCESS;
2150 status = L2CAP_CS_NO_INFO;
2153 sk->sk_state = BT_CONNECT2;
2154 result = L2CAP_CR_PEND;
2155 status = L2CAP_CS_AUTHEN_PEND;
2158 sk->sk_state = BT_CONNECT2;
2159 result = L2CAP_CR_PEND;
2160 status = L2CAP_CS_NO_INFO;
2163 write_unlock_bh(&conn->chan_lock);
2166 bh_unlock_sock(parent);
2169 rsp.scid = cpu_to_le16(scid);
2170 rsp.dcid = cpu_to_le16(dcid);
2171 rsp.result = cpu_to_le16(result);
2172 rsp.status = cpu_to_le16(status);
2173 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2175 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2176 struct l2cap_info_req info;
2177 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2179 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2180 conn->info_ident = l2cap_get_ident(conn);
2182 mod_timer(&conn->info_timer, jiffies +
2183 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2185 l2cap_send_cmd(conn, conn->info_ident,
2186 L2CAP_INFO_REQ, sizeof(info), &info);
2189 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2190 result == L2CAP_CR_SUCCESS) {
2192 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2193 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2194 l2cap_build_conf_req(chan, buf), buf);
2195 chan->num_conf_req++;
2201 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2203 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2204 u16 scid, dcid, result, status;
2205 struct l2cap_chan *chan;
2209 scid = __le16_to_cpu(rsp->scid);
2210 dcid = __le16_to_cpu(rsp->dcid);
2211 result = __le16_to_cpu(rsp->result);
2212 status = __le16_to_cpu(rsp->status);
2214 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2217 chan = l2cap_get_chan_by_scid(conn, scid);
2221 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2229 case L2CAP_CR_SUCCESS:
2230 sk->sk_state = BT_CONFIG;
2233 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2235 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2238 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2240 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2241 l2cap_build_conf_req(chan, req), req);
2242 chan->num_conf_req++;
2246 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2250 /* don't delete l2cap channel if sk is owned by user */
2251 if (sock_owned_by_user(sk)) {
2252 sk->sk_state = BT_DISCONN;
2253 l2cap_sock_clear_timer(sk);
2254 l2cap_sock_set_timer(sk, HZ / 5);
2258 l2cap_chan_del(chan, ECONNREFUSED);
2266 static inline void set_default_fcs(struct l2cap_chan *chan)
2268 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2270 /* FCS is enabled only in ERTM or streaming mode, if one or both
2273 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2274 chan->fcs = L2CAP_FCS_NONE;
2275 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2276 chan->fcs = L2CAP_FCS_CRC16;
2279 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2281 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2284 struct l2cap_chan *chan;
2288 dcid = __le16_to_cpu(req->dcid);
2289 flags = __le16_to_cpu(req->flags);
2291 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2293 chan = l2cap_get_chan_by_scid(conn, dcid);
2299 if (sk->sk_state != BT_CONFIG) {
2300 struct l2cap_cmd_rej rej;
2302 rej.reason = cpu_to_le16(0x0002);
2303 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2308 /* Reject if config buffer is too small. */
2309 len = cmd_len - sizeof(*req);
2310 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2311 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2312 l2cap_build_conf_rsp(chan, rsp,
2313 L2CAP_CONF_REJECT, flags), rsp);
2318 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2319 chan->conf_len += len;
2321 if (flags & 0x0001) {
2322 /* Incomplete config. Send empty response. */
2323 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2324 l2cap_build_conf_rsp(chan, rsp,
2325 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2329 /* Complete config. */
2330 len = l2cap_parse_conf_req(chan, rsp);
2332 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2336 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2337 chan->num_conf_rsp++;
2339 /* Reset config buffer. */
2342 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2345 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2346 set_default_fcs(chan);
2348 sk->sk_state = BT_CONNECTED;
2350 chan->next_tx_seq = 0;
2351 chan->expected_tx_seq = 0;
2352 skb_queue_head_init(&chan->tx_q);
2353 if (chan->mode == L2CAP_MODE_ERTM)
2354 l2cap_ertm_init(chan);
2356 l2cap_chan_ready(sk);
2360 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2362 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2363 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2364 l2cap_build_conf_req(chan, buf), buf);
2365 chan->num_conf_req++;
2373 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2375 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2376 u16 scid, flags, result;
2377 struct l2cap_chan *chan;
2379 int len = cmd->len - sizeof(*rsp);
2381 scid = __le16_to_cpu(rsp->scid);
2382 flags = __le16_to_cpu(rsp->flags);
2383 result = __le16_to_cpu(rsp->result);
2385 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2386 scid, flags, result);
2388 chan = l2cap_get_chan_by_scid(conn, scid);
2395 case L2CAP_CONF_SUCCESS:
2396 l2cap_conf_rfc_get(chan, rsp->data, len);
2399 case L2CAP_CONF_UNACCEPT:
2400 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2403 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2404 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2408 /* throw out any old stored conf requests */
2409 result = L2CAP_CONF_SUCCESS;
2410 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2413 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2417 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2418 L2CAP_CONF_REQ, len, req);
2419 chan->num_conf_req++;
2420 if (result != L2CAP_CONF_SUCCESS)
2426 sk->sk_err = ECONNRESET;
2427 l2cap_sock_set_timer(sk, HZ * 5);
2428 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2435 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2437 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2438 set_default_fcs(chan);
2440 sk->sk_state = BT_CONNECTED;
2441 chan->next_tx_seq = 0;
2442 chan->expected_tx_seq = 0;
2443 skb_queue_head_init(&chan->tx_q);
2444 if (chan->mode == L2CAP_MODE_ERTM)
2445 l2cap_ertm_init(chan);
2447 l2cap_chan_ready(sk);
2455 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2457 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2458 struct l2cap_disconn_rsp rsp;
2460 struct l2cap_chan *chan;
2463 scid = __le16_to_cpu(req->scid);
2464 dcid = __le16_to_cpu(req->dcid);
2466 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2468 chan = l2cap_get_chan_by_scid(conn, dcid);
2474 rsp.dcid = cpu_to_le16(chan->scid);
2475 rsp.scid = cpu_to_le16(chan->dcid);
2476 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2478 sk->sk_shutdown = SHUTDOWN_MASK;
2480 /* don't delete l2cap channel if sk is owned by user */
2481 if (sock_owned_by_user(sk)) {
2482 sk->sk_state = BT_DISCONN;
2483 l2cap_sock_clear_timer(sk);
2484 l2cap_sock_set_timer(sk, HZ / 5);
2489 l2cap_chan_del(chan, ECONNRESET);
2492 l2cap_sock_kill(sk);
2496 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2498 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2500 struct l2cap_chan *chan;
2503 scid = __le16_to_cpu(rsp->scid);
2504 dcid = __le16_to_cpu(rsp->dcid);
2506 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2508 chan = l2cap_get_chan_by_scid(conn, scid);
2514 /* don't delete l2cap channel if sk is owned by user */
2515 if (sock_owned_by_user(sk)) {
2516 sk->sk_state = BT_DISCONN;
2517 l2cap_sock_clear_timer(sk);
2518 l2cap_sock_set_timer(sk, HZ / 5);
2523 l2cap_chan_del(chan, 0);
2526 l2cap_sock_kill(sk);
2530 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2532 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2535 type = __le16_to_cpu(req->type);
2537 BT_DBG("type 0x%4.4x", type);
2539 if (type == L2CAP_IT_FEAT_MASK) {
2541 u32 feat_mask = l2cap_feat_mask;
2542 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2543 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2544 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2546 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2548 put_unaligned_le32(feat_mask, rsp->data);
2549 l2cap_send_cmd(conn, cmd->ident,
2550 L2CAP_INFO_RSP, sizeof(buf), buf);
2551 } else if (type == L2CAP_IT_FIXED_CHAN) {
2553 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2554 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2555 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2556 memcpy(buf + 4, l2cap_fixed_chan, 8);
2557 l2cap_send_cmd(conn, cmd->ident,
2558 L2CAP_INFO_RSP, sizeof(buf), buf);
2560 struct l2cap_info_rsp rsp;
2561 rsp.type = cpu_to_le16(type);
2562 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2563 l2cap_send_cmd(conn, cmd->ident,
2564 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2570 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2572 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2575 type = __le16_to_cpu(rsp->type);
2576 result = __le16_to_cpu(rsp->result);
2578 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2580 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2581 if (cmd->ident != conn->info_ident ||
2582 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2585 del_timer(&conn->info_timer);
2587 if (result != L2CAP_IR_SUCCESS) {
2588 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2589 conn->info_ident = 0;
2591 l2cap_conn_start(conn);
2596 if (type == L2CAP_IT_FEAT_MASK) {
2597 conn->feat_mask = get_unaligned_le32(rsp->data);
2599 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2600 struct l2cap_info_req req;
2601 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2603 conn->info_ident = l2cap_get_ident(conn);
2605 l2cap_send_cmd(conn, conn->info_ident,
2606 L2CAP_INFO_REQ, sizeof(req), &req);
2608 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2609 conn->info_ident = 0;
2611 l2cap_conn_start(conn);
2613 } else if (type == L2CAP_IT_FIXED_CHAN) {
2614 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2615 conn->info_ident = 0;
2617 l2cap_conn_start(conn);
2623 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2628 if (min > max || min < 6 || max > 3200)
2631 if (to_multiplier < 10 || to_multiplier > 3200)
2634 if (max >= to_multiplier * 8)
2637 max_latency = (to_multiplier * 8 / max) - 1;
2638 if (latency > 499 || latency > max_latency)
2644 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2645 struct l2cap_cmd_hdr *cmd, u8 *data)
2647 struct hci_conn *hcon = conn->hcon;
2648 struct l2cap_conn_param_update_req *req;
2649 struct l2cap_conn_param_update_rsp rsp;
2650 u16 min, max, latency, to_multiplier, cmd_len;
2653 if (!(hcon->link_mode & HCI_LM_MASTER))
2656 cmd_len = __le16_to_cpu(cmd->len);
2657 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2660 req = (struct l2cap_conn_param_update_req *) data;
2661 min = __le16_to_cpu(req->min);
2662 max = __le16_to_cpu(req->max);
2663 latency = __le16_to_cpu(req->latency);
2664 to_multiplier = __le16_to_cpu(req->to_multiplier);
2666 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2667 min, max, latency, to_multiplier);
2669 memset(&rsp, 0, sizeof(rsp));
2671 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2673 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2675 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2677 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2681 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2686 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2687 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2691 switch (cmd->code) {
2692 case L2CAP_COMMAND_REJ:
2693 l2cap_command_rej(conn, cmd, data);
2696 case L2CAP_CONN_REQ:
2697 err = l2cap_connect_req(conn, cmd, data);
2700 case L2CAP_CONN_RSP:
2701 err = l2cap_connect_rsp(conn, cmd, data);
2704 case L2CAP_CONF_REQ:
2705 err = l2cap_config_req(conn, cmd, cmd_len, data);
2708 case L2CAP_CONF_RSP:
2709 err = l2cap_config_rsp(conn, cmd, data);
2712 case L2CAP_DISCONN_REQ:
2713 err = l2cap_disconnect_req(conn, cmd, data);
2716 case L2CAP_DISCONN_RSP:
2717 err = l2cap_disconnect_rsp(conn, cmd, data);
2720 case L2CAP_ECHO_REQ:
2721 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2724 case L2CAP_ECHO_RSP:
2727 case L2CAP_INFO_REQ:
2728 err = l2cap_information_req(conn, cmd, data);
2731 case L2CAP_INFO_RSP:
2732 err = l2cap_information_rsp(conn, cmd, data);
2736 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2744 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2745 struct l2cap_cmd_hdr *cmd, u8 *data)
2747 switch (cmd->code) {
2748 case L2CAP_COMMAND_REJ:
2751 case L2CAP_CONN_PARAM_UPDATE_REQ:
2752 return l2cap_conn_param_update_req(conn, cmd, data);
2754 case L2CAP_CONN_PARAM_UPDATE_RSP:
2758 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2763 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2764 struct sk_buff *skb)
2766 u8 *data = skb->data;
2768 struct l2cap_cmd_hdr cmd;
2771 l2cap_raw_recv(conn, skb);
2773 while (len >= L2CAP_CMD_HDR_SIZE) {
2775 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2776 data += L2CAP_CMD_HDR_SIZE;
2777 len -= L2CAP_CMD_HDR_SIZE;
2779 cmd_len = le16_to_cpu(cmd.len);
2781 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2783 if (cmd_len > len || !cmd.ident) {
2784 BT_DBG("corrupted command");
2788 if (conn->hcon->type == LE_LINK)
2789 err = l2cap_le_sig_cmd(conn, &cmd, data);
2791 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2794 struct l2cap_cmd_rej rej;
2796 BT_ERR("Wrong link type (%d)", err);
2798 /* FIXME: Map err to a valid reason */
2799 rej.reason = cpu_to_le16(0);
2800 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2810 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
2812 u16 our_fcs, rcv_fcs;
2813 int hdr_size = L2CAP_HDR_SIZE + 2;
2815 if (chan->fcs == L2CAP_FCS_CRC16) {
2816 skb_trim(skb, skb->len - 2);
2817 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2818 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2820 if (our_fcs != rcv_fcs)
2826 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2830 chan->frames_sent = 0;
2832 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2834 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2835 control |= L2CAP_SUPER_RCV_NOT_READY;
2836 l2cap_send_sframe(chan, control);
2837 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2840 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2841 l2cap_retransmit_frames(chan);
2843 l2cap_ertm_send(chan);
2845 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2846 chan->frames_sent == 0) {
2847 control |= L2CAP_SUPER_RCV_READY;
2848 l2cap_send_sframe(chan, control);
2852 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2854 struct sk_buff *next_skb;
2855 int tx_seq_offset, next_tx_seq_offset;
2857 bt_cb(skb)->tx_seq = tx_seq;
2858 bt_cb(skb)->sar = sar;
2860 next_skb = skb_peek(&chan->srej_q);
2862 __skb_queue_tail(&chan->srej_q, skb);
2866 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2867 if (tx_seq_offset < 0)
2868 tx_seq_offset += 64;
2871 if (bt_cb(next_skb)->tx_seq == tx_seq)
2874 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2875 chan->buffer_seq) % 64;
2876 if (next_tx_seq_offset < 0)
2877 next_tx_seq_offset += 64;
2879 if (next_tx_seq_offset > tx_seq_offset) {
2880 __skb_queue_before(&chan->srej_q, next_skb, skb);
2884 if (skb_queue_is_last(&chan->srej_q, next_skb))
2887 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2889 __skb_queue_tail(&chan->srej_q, skb);
2894 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2896 struct sk_buff *_skb;
2899 switch (control & L2CAP_CTRL_SAR) {
2900 case L2CAP_SDU_UNSEGMENTED:
2901 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2904 err = sock_queue_rcv_skb(chan->sk, skb);
2910 case L2CAP_SDU_START:
2911 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2914 chan->sdu_len = get_unaligned_le16(skb->data);
2916 if (chan->sdu_len > chan->imtu)
2919 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2923 /* pull sdu_len bytes only after alloc, because of Local Busy
2924 * condition we have to be sure that this will be executed
2925 * only once, i.e., when alloc does not fail */
2928 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2930 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2931 chan->partial_sdu_len = skb->len;
2934 case L2CAP_SDU_CONTINUE:
2935 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2941 chan->partial_sdu_len += skb->len;
2942 if (chan->partial_sdu_len > chan->sdu_len)
2945 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2950 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2956 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2957 chan->partial_sdu_len += skb->len;
2959 if (chan->partial_sdu_len > chan->imtu)
2962 if (chan->partial_sdu_len != chan->sdu_len)
2965 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2968 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2970 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2974 err = sock_queue_rcv_skb(chan->sk, _skb);
2977 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2981 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2982 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2984 kfree_skb(chan->sdu);
2992 kfree_skb(chan->sdu);
2996 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3001 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3003 struct sk_buff *skb;
3007 while ((skb = skb_dequeue(&chan->busy_q))) {
3008 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3009 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3011 skb_queue_head(&chan->busy_q, skb);
3015 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3018 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
3021 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3022 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3023 l2cap_send_sframe(chan, control);
3024 chan->retry_count = 1;
3026 del_timer(&chan->retrans_timer);
3027 __mod_monitor_timer();
3029 chan->conn_state |= L2CAP_CONN_WAIT_F;
3032 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3033 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3035 BT_DBG("chan %p, Exit local busy", chan);
3040 static void l2cap_busy_work(struct work_struct *work)
3042 DECLARE_WAITQUEUE(wait, current);
3043 struct l2cap_chan *chan =
3044 container_of(work, struct l2cap_chan, busy_work);
3045 struct sock *sk = chan->sk;
3046 int n_tries = 0, timeo = HZ/5, err;
3047 struct sk_buff *skb;
3051 add_wait_queue(sk_sleep(sk), &wait);
3052 while ((skb = skb_peek(&chan->busy_q))) {
3053 set_current_state(TASK_INTERRUPTIBLE);
3055 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3057 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3064 if (signal_pending(current)) {
3065 err = sock_intr_errno(timeo);
3070 timeo = schedule_timeout(timeo);
3073 err = sock_error(sk);
3077 if (l2cap_try_push_rx_skb(chan) == 0)
3081 set_current_state(TASK_RUNNING);
3082 remove_wait_queue(sk_sleep(sk), &wait);
3087 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3091 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3092 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3093 __skb_queue_tail(&chan->busy_q, skb);
3094 return l2cap_try_push_rx_skb(chan);
3099 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3101 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3105 /* Busy Condition */
3106 BT_DBG("chan %p, Enter local busy", chan);
3108 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3109 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3110 __skb_queue_tail(&chan->busy_q, skb);
3112 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3113 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3114 l2cap_send_sframe(chan, sctrl);
3116 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3118 del_timer(&chan->ack_timer);
3120 queue_work(_busy_wq, &chan->busy_work);
3125 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3127 struct sk_buff *_skb;
3131 * TODO: We have to notify the userland if some data is lost with the
3135 switch (control & L2CAP_CTRL_SAR) {
3136 case L2CAP_SDU_UNSEGMENTED:
3137 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3138 kfree_skb(chan->sdu);
3142 err = sock_queue_rcv_skb(chan->sk, skb);
3148 case L2CAP_SDU_START:
3149 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3150 kfree_skb(chan->sdu);
3154 chan->sdu_len = get_unaligned_le16(skb->data);
3157 if (chan->sdu_len > chan->imtu) {
3162 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3168 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3170 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3171 chan->partial_sdu_len = skb->len;
3175 case L2CAP_SDU_CONTINUE:
3176 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3179 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3181 chan->partial_sdu_len += skb->len;
3182 if (chan->partial_sdu_len > chan->sdu_len)
3183 kfree_skb(chan->sdu);
3190 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3193 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3195 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3196 chan->partial_sdu_len += skb->len;
3198 if (chan->partial_sdu_len > chan->imtu)
3201 if (chan->partial_sdu_len == chan->sdu_len) {
3202 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3203 err = sock_queue_rcv_skb(chan->sk, _skb);
3210 kfree_skb(chan->sdu);
3218 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3220 struct sk_buff *skb;
3223 while ((skb = skb_peek(&chan->srej_q))) {
3224 if (bt_cb(skb)->tx_seq != tx_seq)
3227 skb = skb_dequeue(&chan->srej_q);
3228 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3229 l2cap_ertm_reassembly_sdu(chan, skb, control);
3230 chan->buffer_seq_srej =
3231 (chan->buffer_seq_srej + 1) % 64;
3232 tx_seq = (tx_seq + 1) % 64;
3236 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3238 struct srej_list *l, *tmp;
3241 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3242 if (l->tx_seq == tx_seq) {
3247 control = L2CAP_SUPER_SELECT_REJECT;
3248 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3249 l2cap_send_sframe(chan, control);
3251 list_add_tail(&l->list, &chan->srej_l);
3255 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3257 struct srej_list *new;
3260 while (tx_seq != chan->expected_tx_seq) {
3261 control = L2CAP_SUPER_SELECT_REJECT;
3262 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3263 l2cap_send_sframe(chan, control);
3265 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3266 new->tx_seq = chan->expected_tx_seq;
3267 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3268 list_add_tail(&new->list, &chan->srej_l);
3270 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3273 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3275 u8 tx_seq = __get_txseq(rx_control);
3276 u8 req_seq = __get_reqseq(rx_control);
3277 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3278 int tx_seq_offset, expected_tx_seq_offset;
3279 int num_to_ack = (chan->tx_win/6) + 1;
3282 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3283 tx_seq, rx_control);
3285 if (L2CAP_CTRL_FINAL & rx_control &&
3286 chan->conn_state & L2CAP_CONN_WAIT_F) {
3287 del_timer(&chan->monitor_timer);
3288 if (chan->unacked_frames > 0)
3289 __mod_retrans_timer();
3290 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3293 chan->expected_ack_seq = req_seq;
3294 l2cap_drop_acked_frames(chan);
3296 if (tx_seq == chan->expected_tx_seq)
3299 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3300 if (tx_seq_offset < 0)
3301 tx_seq_offset += 64;
3303 /* invalid tx_seq */
3304 if (tx_seq_offset >= chan->tx_win) {
3305 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3309 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3312 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3313 struct srej_list *first;
3315 first = list_first_entry(&chan->srej_l,
3316 struct srej_list, list);
3317 if (tx_seq == first->tx_seq) {
3318 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3319 l2cap_check_srej_gap(chan, tx_seq);
3321 list_del(&first->list);
3324 if (list_empty(&chan->srej_l)) {
3325 chan->buffer_seq = chan->buffer_seq_srej;
3326 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3327 l2cap_send_ack(chan);
3328 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3331 struct srej_list *l;
3333 /* duplicated tx_seq */
3334 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3337 list_for_each_entry(l, &chan->srej_l, list) {
3338 if (l->tx_seq == tx_seq) {
3339 l2cap_resend_srejframe(chan, tx_seq);
3343 l2cap_send_srejframe(chan, tx_seq);
3346 expected_tx_seq_offset =
3347 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3348 if (expected_tx_seq_offset < 0)
3349 expected_tx_seq_offset += 64;
3351 /* duplicated tx_seq */
3352 if (tx_seq_offset < expected_tx_seq_offset)
3355 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3357 BT_DBG("chan %p, Enter SREJ", chan);
3359 INIT_LIST_HEAD(&chan->srej_l);
3360 chan->buffer_seq_srej = chan->buffer_seq;
3362 __skb_queue_head_init(&chan->srej_q);
3363 __skb_queue_head_init(&chan->busy_q);
3364 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3366 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3368 l2cap_send_srejframe(chan, tx_seq);
3370 del_timer(&chan->ack_timer);
3375 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3377 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3378 bt_cb(skb)->tx_seq = tx_seq;
3379 bt_cb(skb)->sar = sar;
3380 __skb_queue_tail(&chan->srej_q, skb);
3384 err = l2cap_push_rx_skb(chan, skb, rx_control);
3388 if (rx_control & L2CAP_CTRL_FINAL) {
3389 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3390 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3392 l2cap_retransmit_frames(chan);
3397 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3398 if (chan->num_acked == num_to_ack - 1)
3399 l2cap_send_ack(chan);
3408 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3410 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3413 chan->expected_ack_seq = __get_reqseq(rx_control);
3414 l2cap_drop_acked_frames(chan);
3416 if (rx_control & L2CAP_CTRL_POLL) {
3417 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3418 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3419 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3420 (chan->unacked_frames > 0))
3421 __mod_retrans_timer();
3423 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3424 l2cap_send_srejtail(chan);
3426 l2cap_send_i_or_rr_or_rnr(chan);
3429 } else if (rx_control & L2CAP_CTRL_FINAL) {
3430 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3432 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3433 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3435 l2cap_retransmit_frames(chan);
3438 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3439 (chan->unacked_frames > 0))
3440 __mod_retrans_timer();
3442 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3443 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3444 l2cap_send_ack(chan);
3446 l2cap_ertm_send(chan);
3450 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3452 u8 tx_seq = __get_reqseq(rx_control);
3454 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3456 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3458 chan->expected_ack_seq = tx_seq;
3459 l2cap_drop_acked_frames(chan);
3461 if (rx_control & L2CAP_CTRL_FINAL) {
3462 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3463 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3465 l2cap_retransmit_frames(chan);
3467 l2cap_retransmit_frames(chan);
3469 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3470 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3473 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3475 u8 tx_seq = __get_reqseq(rx_control);
3477 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3479 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3481 if (rx_control & L2CAP_CTRL_POLL) {
3482 chan->expected_ack_seq = tx_seq;
3483 l2cap_drop_acked_frames(chan);
3485 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3486 l2cap_retransmit_one_frame(chan, tx_seq);
3488 l2cap_ertm_send(chan);
3490 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3491 chan->srej_save_reqseq = tx_seq;
3492 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3494 } else if (rx_control & L2CAP_CTRL_FINAL) {
3495 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3496 chan->srej_save_reqseq == tx_seq)
3497 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3499 l2cap_retransmit_one_frame(chan, tx_seq);
3501 l2cap_retransmit_one_frame(chan, tx_seq);
3502 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3503 chan->srej_save_reqseq = tx_seq;
3504 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3509 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3511 u8 tx_seq = __get_reqseq(rx_control);
3513 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3515 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3516 chan->expected_ack_seq = tx_seq;
3517 l2cap_drop_acked_frames(chan);
3519 if (rx_control & L2CAP_CTRL_POLL)
3520 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3522 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3523 del_timer(&chan->retrans_timer);
3524 if (rx_control & L2CAP_CTRL_POLL)
3525 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3529 if (rx_control & L2CAP_CTRL_POLL)
3530 l2cap_send_srejtail(chan);
3532 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3535 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3537 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3539 if (L2CAP_CTRL_FINAL & rx_control &&
3540 chan->conn_state & L2CAP_CONN_WAIT_F) {
3541 del_timer(&chan->monitor_timer);
3542 if (chan->unacked_frames > 0)
3543 __mod_retrans_timer();
3544 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3547 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3548 case L2CAP_SUPER_RCV_READY:
3549 l2cap_data_channel_rrframe(chan, rx_control);
3552 case L2CAP_SUPER_REJECT:
3553 l2cap_data_channel_rejframe(chan, rx_control);
3556 case L2CAP_SUPER_SELECT_REJECT:
3557 l2cap_data_channel_srejframe(chan, rx_control);
3560 case L2CAP_SUPER_RCV_NOT_READY:
3561 l2cap_data_channel_rnrframe(chan, rx_control);
3569 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3571 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3574 int len, next_tx_seq_offset, req_seq_offset;
3576 control = get_unaligned_le16(skb->data);
3581 * We can just drop the corrupted I-frame here.
3582 * Receiver will miss it and start proper recovery
3583 * procedures and ask retransmission.
3585 if (l2cap_check_fcs(chan, skb))
3588 if (__is_sar_start(control) && __is_iframe(control))
3591 if (chan->fcs == L2CAP_FCS_CRC16)
3594 if (len > chan->mps) {
3595 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3599 req_seq = __get_reqseq(control);
3600 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3601 if (req_seq_offset < 0)
3602 req_seq_offset += 64;
3604 next_tx_seq_offset =
3605 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3606 if (next_tx_seq_offset < 0)
3607 next_tx_seq_offset += 64;
3609 /* check for invalid req-seq */
3610 if (req_seq_offset > next_tx_seq_offset) {
3611 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3615 if (__is_iframe(control)) {
3617 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3621 l2cap_data_channel_iframe(chan, control, skb);
3625 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3629 l2cap_data_channel_sframe(chan, control, skb);
3639 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3641 struct l2cap_chan *chan;
3643 struct l2cap_pinfo *pi;
3648 chan = l2cap_get_chan_by_scid(conn, cid);
3650 BT_DBG("unknown cid 0x%4.4x", cid);
3657 BT_DBG("chan %p, len %d", chan, skb->len);
3659 if (sk->sk_state != BT_CONNECTED)
3662 switch (chan->mode) {
3663 case L2CAP_MODE_BASIC:
3664 /* If socket recv buffers overflows we drop data here
3665 * which is *bad* because L2CAP has to be reliable.
3666 * But we don't have any other choice. L2CAP doesn't
3667 * provide flow control mechanism. */
3669 if (chan->imtu < skb->len)
3672 if (!sock_queue_rcv_skb(sk, skb))
3676 case L2CAP_MODE_ERTM:
3677 if (!sock_owned_by_user(sk)) {
3678 l2cap_ertm_data_rcv(sk, skb);
3680 if (sk_add_backlog(sk, skb))
3686 case L2CAP_MODE_STREAMING:
3687 control = get_unaligned_le16(skb->data);
3691 if (l2cap_check_fcs(chan, skb))
3694 if (__is_sar_start(control))
3697 if (chan->fcs == L2CAP_FCS_CRC16)
3700 if (len > chan->mps || len < 0 || __is_sframe(control))
3703 tx_seq = __get_txseq(control);
3705 if (chan->expected_tx_seq == tx_seq)
3706 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3708 chan->expected_tx_seq = (tx_seq + 1) % 64;
3710 l2cap_streaming_reassembly_sdu(chan, skb, control);
3715 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3729 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3733 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3739 BT_DBG("sk %p, len %d", sk, skb->len);
3741 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3744 if (l2cap_pi(sk)->chan->imtu < skb->len)
3747 if (!sock_queue_rcv_skb(sk, skb))
3759 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3763 sk = l2cap_get_sock_by_scid(0, cid, conn->src);
3769 BT_DBG("sk %p, len %d", sk, skb->len);
3771 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3774 if (l2cap_pi(sk)->chan->imtu < skb->len)
3777 if (!sock_queue_rcv_skb(sk, skb))
3789 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3791 struct l2cap_hdr *lh = (void *) skb->data;
3795 skb_pull(skb, L2CAP_HDR_SIZE);
3796 cid = __le16_to_cpu(lh->cid);
3797 len = __le16_to_cpu(lh->len);
3799 if (len != skb->len) {
3804 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3807 case L2CAP_CID_LE_SIGNALING:
3808 case L2CAP_CID_SIGNALING:
3809 l2cap_sig_channel(conn, skb);
3812 case L2CAP_CID_CONN_LESS:
3813 psm = get_unaligned_le16(skb->data);
3815 l2cap_conless_channel(conn, psm, skb);
3818 case L2CAP_CID_LE_DATA:
3819 l2cap_att_channel(conn, cid, skb);
3823 l2cap_data_channel(conn, cid, skb);
3828 /* ---- L2CAP interface with lower layer (HCI) ---- */
3830 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3832 int exact = 0, lm1 = 0, lm2 = 0;
3833 register struct sock *sk;
3834 struct hlist_node *node;
3836 if (type != ACL_LINK)
3839 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3841 /* Find listening sockets and check their link_mode */
3842 read_lock(&l2cap_sk_list.lock);
3843 sk_for_each(sk, node, &l2cap_sk_list.head) {
3844 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3846 if (sk->sk_state != BT_LISTEN)
3849 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3850 lm1 |= HCI_LM_ACCEPT;
3851 if (chan->role_switch)
3852 lm1 |= HCI_LM_MASTER;
3854 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3855 lm2 |= HCI_LM_ACCEPT;
3856 if (chan->role_switch)
3857 lm2 |= HCI_LM_MASTER;
3860 read_unlock(&l2cap_sk_list.lock);
3862 return exact ? lm1 : lm2;
3865 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3867 struct l2cap_conn *conn;
3869 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3871 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3875 conn = l2cap_conn_add(hcon, status);
3877 l2cap_conn_ready(conn);
3879 l2cap_conn_del(hcon, bt_err(status));
3884 static int l2cap_disconn_ind(struct hci_conn *hcon)
3886 struct l2cap_conn *conn = hcon->l2cap_data;
3888 BT_DBG("hcon %p", hcon);
3890 if (hcon->type != ACL_LINK || !conn)
3893 return conn->disc_reason;
3896 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3898 BT_DBG("hcon %p reason %d", hcon, reason);
3900 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3903 l2cap_conn_del(hcon, bt_err(reason));
3908 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3910 struct sock *sk = chan->sk;
3912 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3915 if (encrypt == 0x00) {
3916 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3917 l2cap_sock_clear_timer(sk);
3918 l2cap_sock_set_timer(sk, HZ * 5);
3919 } else if (chan->sec_level == BT_SECURITY_HIGH)
3920 __l2cap_sock_close(sk, ECONNREFUSED);
3922 if (chan->sec_level == BT_SECURITY_MEDIUM)
3923 l2cap_sock_clear_timer(sk);
3927 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3929 struct l2cap_conn *conn = hcon->l2cap_data;
3930 struct l2cap_chan *chan;
3935 BT_DBG("conn %p", conn);
3937 read_lock(&conn->chan_lock);
3939 list_for_each_entry(chan, &conn->chan_l, list) {
3940 struct sock *sk = chan->sk;
3944 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
3949 if (!status && (sk->sk_state == BT_CONNECTED ||
3950 sk->sk_state == BT_CONFIG)) {
3951 l2cap_check_encryption(chan, encrypt);
3956 if (sk->sk_state == BT_CONNECT) {
3958 struct l2cap_conn_req req;
3959 req.scid = cpu_to_le16(chan->scid);
3960 req.psm = chan->psm;
3962 chan->ident = l2cap_get_ident(conn);
3963 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
3965 l2cap_send_cmd(conn, chan->ident,
3966 L2CAP_CONN_REQ, sizeof(req), &req);
3968 l2cap_sock_clear_timer(sk);
3969 l2cap_sock_set_timer(sk, HZ / 10);
3971 } else if (sk->sk_state == BT_CONNECT2) {
3972 struct l2cap_conn_rsp rsp;
3976 sk->sk_state = BT_CONFIG;
3977 result = L2CAP_CR_SUCCESS;
3979 sk->sk_state = BT_DISCONN;
3980 l2cap_sock_set_timer(sk, HZ / 10);
3981 result = L2CAP_CR_SEC_BLOCK;
3984 rsp.scid = cpu_to_le16(chan->dcid);
3985 rsp.dcid = cpu_to_le16(chan->scid);
3986 rsp.result = cpu_to_le16(result);
3987 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3988 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3995 read_unlock(&conn->chan_lock);
4000 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4002 struct l2cap_conn *conn = hcon->l2cap_data;
4005 conn = l2cap_conn_add(hcon, 0);
4010 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4012 if (!(flags & ACL_CONT)) {
4013 struct l2cap_hdr *hdr;
4014 struct l2cap_chan *chan;
4019 BT_ERR("Unexpected start frame (len %d)", skb->len);
4020 kfree_skb(conn->rx_skb);
4021 conn->rx_skb = NULL;
4023 l2cap_conn_unreliable(conn, ECOMM);
4026 /* Start fragment always begin with Basic L2CAP header */
4027 if (skb->len < L2CAP_HDR_SIZE) {
4028 BT_ERR("Frame is too short (len %d)", skb->len);
4029 l2cap_conn_unreliable(conn, ECOMM);
4033 hdr = (struct l2cap_hdr *) skb->data;
4034 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4035 cid = __le16_to_cpu(hdr->cid);
4037 if (len == skb->len) {
4038 /* Complete frame received */
4039 l2cap_recv_frame(conn, skb);
4043 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4045 if (skb->len > len) {
4046 BT_ERR("Frame is too long (len %d, expected len %d)",
4048 l2cap_conn_unreliable(conn, ECOMM);
4052 chan = l2cap_get_chan_by_scid(conn, cid);
4054 if (chan && chan->sk) {
4055 struct sock *sk = chan->sk;
4057 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4058 BT_ERR("Frame exceeding recv MTU (len %d, "
4062 l2cap_conn_unreliable(conn, ECOMM);
4068 /* Allocate skb for the complete frame (with header) */
4069 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4073 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4075 conn->rx_len = len - skb->len;
4077 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4079 if (!conn->rx_len) {
4080 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4081 l2cap_conn_unreliable(conn, ECOMM);
4085 if (skb->len > conn->rx_len) {
4086 BT_ERR("Fragment is too long (len %d, expected %d)",
4087 skb->len, conn->rx_len);
4088 kfree_skb(conn->rx_skb);
4089 conn->rx_skb = NULL;
4091 l2cap_conn_unreliable(conn, ECOMM);
4095 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4097 conn->rx_len -= skb->len;
4099 if (!conn->rx_len) {
4100 /* Complete frame received */
4101 l2cap_recv_frame(conn, conn->rx_skb);
4102 conn->rx_skb = NULL;
4111 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4114 struct hlist_node *node;
4116 read_lock_bh(&l2cap_sk_list.lock);
4118 sk_for_each(sk, node, &l2cap_sk_list.head) {
4119 struct l2cap_pinfo *pi = l2cap_pi(sk);
4120 struct l2cap_chan *chan = pi->chan;
4122 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4123 batostr(&bt_sk(sk)->src),
4124 batostr(&bt_sk(sk)->dst),
4125 sk->sk_state, __le16_to_cpu(chan->psm),
4126 chan->scid, chan->dcid,
4127 chan->imtu, chan->omtu, chan->sec_level,
4131 read_unlock_bh(&l2cap_sk_list.lock);
4136 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4138 return single_open(file, l2cap_debugfs_show, inode->i_private);
4141 static const struct file_operations l2cap_debugfs_fops = {
4142 .open = l2cap_debugfs_open,
4144 .llseek = seq_lseek,
4145 .release = single_release,
4148 static struct dentry *l2cap_debugfs;
4150 static struct hci_proto l2cap_hci_proto = {
4152 .id = HCI_PROTO_L2CAP,
4153 .connect_ind = l2cap_connect_ind,
4154 .connect_cfm = l2cap_connect_cfm,
4155 .disconn_ind = l2cap_disconn_ind,
4156 .disconn_cfm = l2cap_disconn_cfm,
4157 .security_cfm = l2cap_security_cfm,
4158 .recv_acldata = l2cap_recv_acldata
4161 int __init l2cap_init(void)
4165 err = l2cap_init_sockets();
4169 _busy_wq = create_singlethread_workqueue("l2cap");
4175 err = hci_register_proto(&l2cap_hci_proto);
4177 BT_ERR("L2CAP protocol registration failed");
4178 bt_sock_unregister(BTPROTO_L2CAP);
4183 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4184 bt_debugfs, NULL, &l2cap_debugfs_fops);
4186 BT_ERR("Failed to create L2CAP debug file");
4192 destroy_workqueue(_busy_wq);
4193 l2cap_cleanup_sockets();
4197 void l2cap_exit(void)
4199 debugfs_remove(l2cap_debugfs);
4201 flush_workqueue(_busy_wq);
4202 destroy_workqueue(_busy_wq);
4204 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4205 BT_ERR("L2CAP protocol unregistration failed");
4207 l2cap_cleanup_sockets();
4210 module_param(disable_ertm, bool, 0644);
4211 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");