2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
83 struct sock *s = c->sk;
84 if (l2cap_pi(s)->dcid == cid)
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
96 struct sock *s = c->sk;
97 if (l2cap_pi(s)->scid == cid)
103 /* Find channel with given SCID.
104 * Returns locked socket */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 struct l2cap_chan *c;
109 read_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
113 read_unlock(&conn->chan_lock);
117 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119 struct l2cap_chan *c;
121 list_for_each_entry(c, &conn->chan_l, list) {
122 if (c->ident == ident)
128 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 read_lock(&conn->chan_lock);
133 c = __l2cap_get_chan_by_ident(conn, ident);
136 read_unlock(&conn->chan_lock);
140 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
142 u16 cid = L2CAP_CID_DYN_START;
144 for (; cid < L2CAP_CID_DYN_END; cid++) {
145 if (!__l2cap_get_chan_by_scid(conn, cid))
152 struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
154 struct l2cap_chan *chan;
156 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
165 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
167 struct sock *sk = chan->sk;
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
170 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
172 conn->disc_reason = 0x13;
174 l2cap_pi(sk)->conn = conn;
176 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
177 if (conn->hcon->type == LE_LINK) {
179 chan->omtu = L2CAP_LE_DEFAULT_MTU;
180 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
181 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk)->scid = l2cap_alloc_cid(conn);
185 chan->omtu = L2CAP_DEFAULT_MTU;
187 } else if (sk->sk_type == SOCK_DGRAM) {
188 /* Connectionless socket */
189 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
190 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
191 chan->omtu = L2CAP_DEFAULT_MTU;
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
195 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
196 chan->omtu = L2CAP_DEFAULT_MTU;
201 list_add(&chan->list, &conn->chan_l);
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct l2cap_chan *chan, int err)
208 struct sock *sk = chan->sk;
209 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
210 struct sock *parent = bt_sk(sk)->parent;
212 l2cap_sock_clear_timer(sk);
214 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
217 /* Delete from channel list */
218 write_lock_bh(&conn->chan_lock);
219 list_del(&chan->list);
220 write_unlock_bh(&conn->chan_lock);
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
237 sk->sk_state_change(sk);
239 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
240 chan->conf_state & L2CAP_CONF_INPUT_DONE))
243 skb_queue_purge(&chan->tx_q);
245 if (chan->mode == L2CAP_MODE_ERTM) {
246 struct srej_list *l, *tmp;
248 del_timer(&chan->retrans_timer);
249 del_timer(&chan->monitor_timer);
250 del_timer(&chan->ack_timer);
252 skb_queue_purge(&chan->srej_q);
253 skb_queue_purge(&chan->busy_q);
255 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
265 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
267 struct sock *sk = chan->sk;
269 if (sk->sk_type == SOCK_RAW) {
270 switch (chan->sec_level) {
271 case BT_SECURITY_HIGH:
272 return HCI_AT_DEDICATED_BONDING_MITM;
273 case BT_SECURITY_MEDIUM:
274 return HCI_AT_DEDICATED_BONDING;
276 return HCI_AT_NO_BONDING;
278 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
279 if (chan->sec_level == BT_SECURITY_LOW)
280 chan->sec_level = BT_SECURITY_SDP;
282 if (chan->sec_level == BT_SECURITY_HIGH)
283 return HCI_AT_NO_BONDING_MITM;
285 return HCI_AT_NO_BONDING;
287 switch (chan->sec_level) {
288 case BT_SECURITY_HIGH:
289 return HCI_AT_GENERAL_BONDING_MITM;
290 case BT_SECURITY_MEDIUM:
291 return HCI_AT_GENERAL_BONDING;
293 return HCI_AT_NO_BONDING;
298 /* Service level security */
299 static inline int l2cap_check_security(struct l2cap_chan *chan)
301 struct l2cap_conn *conn = l2cap_pi(chan->sk)->conn;
304 auth_type = l2cap_get_auth_type(chan);
306 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
309 u8 l2cap_get_ident(struct l2cap_conn *conn)
313 /* Get next available identificator.
314 * 1 - 128 are used by kernel.
315 * 129 - 199 are reserved.
316 * 200 - 254 are used by utilities like l2ping, etc.
319 spin_lock_bh(&conn->lock);
321 if (++conn->tx_ident > 128)
326 spin_unlock_bh(&conn->lock);
331 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
333 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
336 BT_DBG("code 0x%2.2x", code);
341 if (lmp_no_flush_capable(conn->hcon->hdev))
342 flags = ACL_START_NO_FLUSH;
346 hci_send_acl(conn->hcon, skb, flags);
349 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
352 struct l2cap_hdr *lh;
353 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
354 struct l2cap_conn *conn = pi->conn;
355 struct sock *sk = (struct sock *)pi;
356 int count, hlen = L2CAP_HDR_SIZE + 2;
359 if (sk->sk_state != BT_CONNECTED)
362 if (chan->fcs == L2CAP_FCS_CRC16)
365 BT_DBG("chan %p, control 0x%2.2x", chan, control);
367 count = min_t(unsigned int, conn->mtu, hlen);
368 control |= L2CAP_CTRL_FRAME_TYPE;
370 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
371 control |= L2CAP_CTRL_FINAL;
372 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
375 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
376 control |= L2CAP_CTRL_POLL;
377 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
380 skb = bt_skb_alloc(count, GFP_ATOMIC);
384 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
385 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
386 lh->cid = cpu_to_le16(pi->dcid);
387 put_unaligned_le16(control, skb_put(skb, 2));
389 if (chan->fcs == L2CAP_FCS_CRC16) {
390 u16 fcs = crc16(0, (u8 *)lh, count - 2);
391 put_unaligned_le16(fcs, skb_put(skb, 2));
394 if (lmp_no_flush_capable(conn->hcon->hdev))
395 flags = ACL_START_NO_FLUSH;
399 hci_send_acl(pi->conn->hcon, skb, flags);
402 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
404 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
405 control |= L2CAP_SUPER_RCV_NOT_READY;
406 chan->conn_state |= L2CAP_CONN_RNR_SENT;
408 control |= L2CAP_SUPER_RCV_READY;
410 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
412 l2cap_send_sframe(chan, control);
415 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
417 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
420 static void l2cap_do_start(struct l2cap_chan *chan)
422 struct sock *sk = chan->sk;
423 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
425 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
426 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
429 if (l2cap_check_security(chan) &&
430 __l2cap_no_conn_pending(chan)) {
431 struct l2cap_conn_req req;
432 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
433 req.psm = l2cap_pi(sk)->psm;
435 chan->ident = l2cap_get_ident(conn);
436 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
438 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
442 struct l2cap_info_req req;
443 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
445 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
446 conn->info_ident = l2cap_get_ident(conn);
448 mod_timer(&conn->info_timer, jiffies +
449 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
451 l2cap_send_cmd(conn, conn->info_ident,
452 L2CAP_INFO_REQ, sizeof(req), &req);
456 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
458 u32 local_feat_mask = l2cap_feat_mask;
460 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
463 case L2CAP_MODE_ERTM:
464 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
465 case L2CAP_MODE_STREAMING:
466 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
472 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
475 struct l2cap_disconn_req req;
482 if (chan->mode == L2CAP_MODE_ERTM) {
483 del_timer(&chan->retrans_timer);
484 del_timer(&chan->monitor_timer);
485 del_timer(&chan->ack_timer);
488 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
489 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
490 l2cap_send_cmd(conn, l2cap_get_ident(conn),
491 L2CAP_DISCONN_REQ, sizeof(req), &req);
493 sk->sk_state = BT_DISCONN;
497 /* ---- L2CAP connections ---- */
498 static void l2cap_conn_start(struct l2cap_conn *conn)
500 struct l2cap_chan *chan, *tmp;
502 BT_DBG("conn %p", conn);
504 read_lock(&conn->chan_lock);
506 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
507 struct sock *sk = chan->sk;
511 if (sk->sk_type != SOCK_SEQPACKET &&
512 sk->sk_type != SOCK_STREAM) {
517 if (sk->sk_state == BT_CONNECT) {
518 struct l2cap_conn_req req;
520 if (!l2cap_check_security(chan) ||
521 !__l2cap_no_conn_pending(chan)) {
526 if (!l2cap_mode_supported(chan->mode,
528 && chan->conf_state &
529 L2CAP_CONF_STATE2_DEVICE) {
530 /* __l2cap_sock_close() calls list_del(chan)
531 * so release the lock */
532 read_unlock_bh(&conn->chan_lock);
533 __l2cap_sock_close(sk, ECONNRESET);
534 read_lock_bh(&conn->chan_lock);
539 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
540 req.psm = l2cap_pi(sk)->psm;
542 chan->ident = l2cap_get_ident(conn);
543 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
545 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
548 } else if (sk->sk_state == BT_CONNECT2) {
549 struct l2cap_conn_rsp rsp;
551 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
552 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
554 if (l2cap_check_security(chan)) {
555 if (bt_sk(sk)->defer_setup) {
556 struct sock *parent = bt_sk(sk)->parent;
557 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
558 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
559 parent->sk_data_ready(parent, 0);
562 sk->sk_state = BT_CONFIG;
563 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
564 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
567 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
568 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
571 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
574 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
575 rsp.result != L2CAP_CR_SUCCESS) {
580 chan->conf_state |= L2CAP_CONF_REQ_SENT;
581 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
582 l2cap_build_conf_req(chan, buf), buf);
583 chan->num_conf_req++;
589 read_unlock(&conn->chan_lock);
592 /* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
595 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
597 struct sock *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
600 read_lock(&l2cap_sk_list.lock);
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
606 if (l2cap_pi(sk)->scid == cid) {
608 if (!bacmp(&bt_sk(sk)->src, src))
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
617 read_unlock(&l2cap_sk_list.lock);
619 return node ? sk : sk1;
622 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
624 struct sock *parent, *sk;
625 struct l2cap_chan *chan;
629 /* Check if we have socket listening on cid */
630 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
635 bh_lock_sock(parent);
637 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog);
643 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
647 chan = l2cap_chan_alloc(sk);
653 l2cap_pi(sk)->chan = chan;
655 write_lock_bh(&conn->chan_lock);
657 hci_conn_hold(conn->hcon);
659 l2cap_sock_init(sk, parent);
661 bacpy(&bt_sk(sk)->src, conn->src);
662 bacpy(&bt_sk(sk)->dst, conn->dst);
664 bt_accept_enqueue(parent, sk);
666 __l2cap_chan_add(conn, chan);
668 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
670 sk->sk_state = BT_CONNECTED;
671 parent->sk_data_ready(parent, 0);
673 write_unlock_bh(&conn->chan_lock);
676 bh_unlock_sock(parent);
679 static void l2cap_conn_ready(struct l2cap_conn *conn)
681 struct l2cap_chan *chan;
683 BT_DBG("conn %p", conn);
685 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
686 l2cap_le_conn_ready(conn);
688 read_lock(&conn->chan_lock);
690 list_for_each_entry(chan, &conn->chan_l, list) {
691 struct sock *sk = chan->sk;
695 if (conn->hcon->type == LE_LINK) {
696 l2cap_sock_clear_timer(sk);
697 sk->sk_state = BT_CONNECTED;
698 sk->sk_state_change(sk);
701 if (sk->sk_type != SOCK_SEQPACKET &&
702 sk->sk_type != SOCK_STREAM) {
703 l2cap_sock_clear_timer(sk);
704 sk->sk_state = BT_CONNECTED;
705 sk->sk_state_change(sk);
706 } else if (sk->sk_state == BT_CONNECT)
707 l2cap_do_start(chan);
712 read_unlock(&conn->chan_lock);
715 /* Notify sockets that we cannot guaranty reliability anymore */
716 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
718 struct l2cap_chan *chan;
720 BT_DBG("conn %p", conn);
722 read_lock(&conn->chan_lock);
724 list_for_each_entry(chan, &conn->chan_l, list) {
725 struct sock *sk = chan->sk;
727 if (chan->force_reliable)
731 read_unlock(&conn->chan_lock);
734 static void l2cap_info_timeout(unsigned long arg)
736 struct l2cap_conn *conn = (void *) arg;
738 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
739 conn->info_ident = 0;
741 l2cap_conn_start(conn);
744 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
746 struct l2cap_conn *conn = hcon->l2cap_data;
751 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
755 hcon->l2cap_data = conn;
758 BT_DBG("hcon %p conn %p", hcon, conn);
760 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
761 conn->mtu = hcon->hdev->le_mtu;
763 conn->mtu = hcon->hdev->acl_mtu;
765 conn->src = &hcon->hdev->bdaddr;
766 conn->dst = &hcon->dst;
770 spin_lock_init(&conn->lock);
771 rwlock_init(&conn->chan_lock);
773 INIT_LIST_HEAD(&conn->chan_l);
775 if (hcon->type != LE_LINK)
776 setup_timer(&conn->info_timer, l2cap_info_timeout,
777 (unsigned long) conn);
779 conn->disc_reason = 0x13;
784 static void l2cap_conn_del(struct hci_conn *hcon, int err)
786 struct l2cap_conn *conn = hcon->l2cap_data;
787 struct l2cap_chan *chan, *l;
793 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
795 kfree_skb(conn->rx_skb);
798 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
801 l2cap_chan_del(chan, err);
806 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
807 del_timer_sync(&conn->info_timer);
809 hcon->l2cap_data = NULL;
813 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
815 write_lock_bh(&conn->chan_lock);
816 __l2cap_chan_add(conn, chan);
817 write_unlock_bh(&conn->chan_lock);
820 /* ---- Socket interface ---- */
822 /* Find socket with psm and source bdaddr.
823 * Returns closest match.
825 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
827 struct sock *sk = NULL, *sk1 = NULL;
828 struct hlist_node *node;
830 read_lock(&l2cap_sk_list.lock);
832 sk_for_each(sk, node, &l2cap_sk_list.head) {
833 if (state && sk->sk_state != state)
836 if (l2cap_pi(sk)->psm == psm) {
838 if (!bacmp(&bt_sk(sk)->src, src))
842 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
847 read_unlock(&l2cap_sk_list.lock);
849 return node ? sk : sk1;
852 int l2cap_chan_connect(struct l2cap_chan *chan)
854 struct sock *sk = chan->sk;
855 bdaddr_t *src = &bt_sk(sk)->src;
856 bdaddr_t *dst = &bt_sk(sk)->dst;
857 struct l2cap_conn *conn;
858 struct hci_conn *hcon;
859 struct hci_dev *hdev;
863 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
866 hdev = hci_get_route(dst, src);
868 return -EHOSTUNREACH;
870 hci_dev_lock_bh(hdev);
872 auth_type = l2cap_get_auth_type(chan);
874 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
875 hcon = hci_connect(hdev, LE_LINK, dst,
876 chan->sec_level, auth_type);
878 hcon = hci_connect(hdev, ACL_LINK, dst,
879 chan->sec_level, auth_type);
886 conn = l2cap_conn_add(hcon, 0);
893 /* Update source addr of the socket */
894 bacpy(src, conn->src);
896 l2cap_chan_add(conn, chan);
898 sk->sk_state = BT_CONNECT;
899 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
901 if (hcon->state == BT_CONNECTED) {
902 if (sk->sk_type != SOCK_SEQPACKET &&
903 sk->sk_type != SOCK_STREAM) {
904 l2cap_sock_clear_timer(sk);
905 if (l2cap_check_security(chan))
906 sk->sk_state = BT_CONNECTED;
908 l2cap_do_start(chan);
914 hci_dev_unlock_bh(hdev);
919 int __l2cap_wait_ack(struct sock *sk)
921 DECLARE_WAITQUEUE(wait, current);
925 add_wait_queue(sk_sleep(sk), &wait);
926 while ((l2cap_pi(sk)->chan->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
927 set_current_state(TASK_INTERRUPTIBLE);
932 if (signal_pending(current)) {
933 err = sock_intr_errno(timeo);
938 timeo = schedule_timeout(timeo);
941 err = sock_error(sk);
945 set_current_state(TASK_RUNNING);
946 remove_wait_queue(sk_sleep(sk), &wait);
950 static void l2cap_monitor_timeout(unsigned long arg)
952 struct l2cap_chan *chan = (void *) arg;
953 struct sock *sk = chan->sk;
955 BT_DBG("chan %p", chan);
958 if (chan->retry_count >= chan->remote_max_tx) {
959 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, ECONNABORTED);
965 __mod_monitor_timer();
967 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
971 static void l2cap_retrans_timeout(unsigned long arg)
973 struct l2cap_chan *chan = (void *) arg;
974 struct sock *sk = chan->sk;
976 BT_DBG("chan %p", chan);
979 chan->retry_count = 1;
980 __mod_monitor_timer();
982 chan->conn_state |= L2CAP_CONN_WAIT_F;
984 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
988 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
992 while ((skb = skb_peek(&chan->tx_q)) &&
993 chan->unacked_frames) {
994 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
997 skb = skb_dequeue(&chan->tx_q);
1000 chan->unacked_frames--;
1003 if (!chan->unacked_frames)
1004 del_timer(&chan->retrans_timer);
1007 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1009 struct sock *sk = chan->sk;
1010 struct hci_conn *hcon = l2cap_pi(sk)->conn->hcon;
1013 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1015 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1016 flags = ACL_START_NO_FLUSH;
1020 hci_send_acl(hcon, skb, flags);
1023 void l2cap_streaming_send(struct l2cap_chan *chan)
1025 struct sk_buff *skb;
1028 while ((skb = skb_dequeue(&chan->tx_q))) {
1029 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1030 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1031 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1033 if (chan->fcs == L2CAP_FCS_CRC16) {
1034 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1035 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1038 l2cap_do_send(chan, skb);
1040 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1044 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1046 struct sock *sk = chan->sk;
1047 struct l2cap_pinfo *pi = l2cap_pi(sk);
1048 struct sk_buff *skb, *tx_skb;
1051 skb = skb_peek(&chan->tx_q);
1056 if (bt_cb(skb)->tx_seq == tx_seq)
1059 if (skb_queue_is_last(&chan->tx_q, skb))
1062 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1064 if (chan->remote_max_tx &&
1065 bt_cb(skb)->retries == chan->remote_max_tx) {
1066 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1070 tx_skb = skb_clone(skb, GFP_ATOMIC);
1071 bt_cb(skb)->retries++;
1072 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1073 control &= L2CAP_CTRL_SAR;
1075 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1076 control |= L2CAP_CTRL_FINAL;
1077 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1080 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1081 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1083 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1085 if (chan->fcs == L2CAP_FCS_CRC16) {
1086 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1087 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1090 l2cap_do_send(chan, tx_skb);
1093 int l2cap_ertm_send(struct l2cap_chan *chan)
1095 struct sk_buff *skb, *tx_skb;
1096 struct sock *sk = chan->sk;
1097 struct l2cap_pinfo *pi = l2cap_pi(sk);
1101 if (sk->sk_state != BT_CONNECTED)
1104 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1106 if (chan->remote_max_tx &&
1107 bt_cb(skb)->retries == chan->remote_max_tx) {
1108 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1112 tx_skb = skb_clone(skb, GFP_ATOMIC);
1114 bt_cb(skb)->retries++;
1116 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1117 control &= L2CAP_CTRL_SAR;
1119 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1120 control |= L2CAP_CTRL_FINAL;
1121 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1123 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1124 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1125 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1128 if (chan->fcs == L2CAP_FCS_CRC16) {
1129 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1130 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1133 l2cap_do_send(chan, tx_skb);
1135 __mod_retrans_timer();
1137 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1138 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1140 if (bt_cb(skb)->retries == 1)
1141 chan->unacked_frames++;
1143 chan->frames_sent++;
1145 if (skb_queue_is_last(&chan->tx_q, skb))
1146 chan->tx_send_head = NULL;
1148 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1156 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1160 if (!skb_queue_empty(&chan->tx_q))
1161 chan->tx_send_head = chan->tx_q.next;
1163 chan->next_tx_seq = chan->expected_ack_seq;
1164 ret = l2cap_ertm_send(chan);
1168 static void l2cap_send_ack(struct l2cap_chan *chan)
1172 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1174 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1175 control |= L2CAP_SUPER_RCV_NOT_READY;
1176 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1177 l2cap_send_sframe(chan, control);
1181 if (l2cap_ertm_send(chan) > 0)
1184 control |= L2CAP_SUPER_RCV_READY;
1185 l2cap_send_sframe(chan, control);
1188 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1190 struct srej_list *tail;
1193 control = L2CAP_SUPER_SELECT_REJECT;
1194 control |= L2CAP_CTRL_FINAL;
1196 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1197 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1199 l2cap_send_sframe(chan, control);
1202 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1204 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1205 struct sk_buff **frag;
1208 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1214 /* Continuation fragments (no L2CAP header) */
1215 frag = &skb_shinfo(skb)->frag_list;
1217 count = min_t(unsigned int, conn->mtu, len);
1219 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1222 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1228 frag = &(*frag)->next;
1234 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1236 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1237 struct sk_buff *skb;
1238 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1239 struct l2cap_hdr *lh;
1241 BT_DBG("sk %p len %d", sk, (int)len);
1243 count = min_t(unsigned int, (conn->mtu - hlen), len);
1244 skb = bt_skb_send_alloc(sk, count + hlen,
1245 msg->msg_flags & MSG_DONTWAIT, &err);
1247 return ERR_PTR(err);
1249 /* Create L2CAP header */
1250 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1251 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1252 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1253 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1255 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1256 if (unlikely(err < 0)) {
1258 return ERR_PTR(err);
1263 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1265 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1266 struct sk_buff *skb;
1267 int err, count, hlen = L2CAP_HDR_SIZE;
1268 struct l2cap_hdr *lh;
1270 BT_DBG("sk %p len %d", sk, (int)len);
1272 count = min_t(unsigned int, (conn->mtu - hlen), len);
1273 skb = bt_skb_send_alloc(sk, count + hlen,
1274 msg->msg_flags & MSG_DONTWAIT, &err);
1276 return ERR_PTR(err);
1278 /* Create L2CAP header */
1279 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1280 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1281 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1283 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1284 if (unlikely(err < 0)) {
1286 return ERR_PTR(err);
1291 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1293 struct sock *sk = chan->sk;
1294 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1295 struct sk_buff *skb;
1296 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1297 struct l2cap_hdr *lh;
1299 BT_DBG("sk %p len %d", sk, (int)len);
1302 return ERR_PTR(-ENOTCONN);
1307 if (chan->fcs == L2CAP_FCS_CRC16)
1310 count = min_t(unsigned int, (conn->mtu - hlen), len);
1311 skb = bt_skb_send_alloc(sk, count + hlen,
1312 msg->msg_flags & MSG_DONTWAIT, &err);
1314 return ERR_PTR(err);
1316 /* Create L2CAP header */
1317 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1318 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1319 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1320 put_unaligned_le16(control, skb_put(skb, 2));
1322 put_unaligned_le16(sdulen, skb_put(skb, 2));
1324 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1325 if (unlikely(err < 0)) {
1327 return ERR_PTR(err);
1330 if (chan->fcs == L2CAP_FCS_CRC16)
1331 put_unaligned_le16(0, skb_put(skb, 2));
1333 bt_cb(skb)->retries = 0;
1337 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1339 struct sk_buff *skb;
1340 struct sk_buff_head sar_queue;
1344 skb_queue_head_init(&sar_queue);
1345 control = L2CAP_SDU_START;
1346 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1348 return PTR_ERR(skb);
1350 __skb_queue_tail(&sar_queue, skb);
1351 len -= chan->remote_mps;
1352 size += chan->remote_mps;
1357 if (len > chan->remote_mps) {
1358 control = L2CAP_SDU_CONTINUE;
1359 buflen = chan->remote_mps;
1361 control = L2CAP_SDU_END;
1365 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1367 skb_queue_purge(&sar_queue);
1368 return PTR_ERR(skb);
1371 __skb_queue_tail(&sar_queue, skb);
1375 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1376 if (chan->tx_send_head == NULL)
1377 chan->tx_send_head = sar_queue.next;
1382 static void l2cap_chan_ready(struct sock *sk)
1384 struct sock *parent = bt_sk(sk)->parent;
1385 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1387 BT_DBG("sk %p, parent %p", sk, parent);
1389 chan->conf_state = 0;
1390 l2cap_sock_clear_timer(sk);
1393 /* Outgoing channel.
1394 * Wake up socket sleeping on connect.
1396 sk->sk_state = BT_CONNECTED;
1397 sk->sk_state_change(sk);
1399 /* Incoming channel.
1400 * Wake up socket sleeping on accept.
1402 parent->sk_data_ready(parent, 0);
1406 /* Copy frame to all raw sockets on that connection */
1407 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1409 struct sk_buff *nskb;
1410 struct l2cap_chan *chan;
1412 BT_DBG("conn %p", conn);
1414 read_lock(&conn->chan_lock);
1415 list_for_each_entry(chan, &conn->chan_l, list) {
1416 struct sock *sk = chan->sk;
1417 if (sk->sk_type != SOCK_RAW)
1420 /* Don't send frame to the socket it came from */
1423 nskb = skb_clone(skb, GFP_ATOMIC);
1427 if (sock_queue_rcv_skb(sk, nskb))
1430 read_unlock(&conn->chan_lock);
1433 /* ---- L2CAP signalling commands ---- */
1434 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1435 u8 code, u8 ident, u16 dlen, void *data)
1437 struct sk_buff *skb, **frag;
1438 struct l2cap_cmd_hdr *cmd;
1439 struct l2cap_hdr *lh;
1442 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1443 conn, code, ident, dlen);
1445 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1446 count = min_t(unsigned int, conn->mtu, len);
1448 skb = bt_skb_alloc(count, GFP_ATOMIC);
1452 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1453 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1455 if (conn->hcon->type == LE_LINK)
1456 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1458 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1460 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1463 cmd->len = cpu_to_le16(dlen);
1466 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1467 memcpy(skb_put(skb, count), data, count);
1473 /* Continuation fragments (no L2CAP header) */
1474 frag = &skb_shinfo(skb)->frag_list;
1476 count = min_t(unsigned int, conn->mtu, len);
1478 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1482 memcpy(skb_put(*frag, count), data, count);
1487 frag = &(*frag)->next;
1497 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1499 struct l2cap_conf_opt *opt = *ptr;
1502 len = L2CAP_CONF_OPT_SIZE + opt->len;
1510 *val = *((u8 *) opt->val);
1514 *val = get_unaligned_le16(opt->val);
1518 *val = get_unaligned_le32(opt->val);
1522 *val = (unsigned long) opt->val;
1526 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1530 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1532 struct l2cap_conf_opt *opt = *ptr;
1534 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1541 *((u8 *) opt->val) = val;
1545 put_unaligned_le16(val, opt->val);
1549 put_unaligned_le32(val, opt->val);
1553 memcpy(opt->val, (void *) val, len);
1557 *ptr += L2CAP_CONF_OPT_SIZE + len;
1560 static void l2cap_ack_timeout(unsigned long arg)
1562 struct l2cap_chan *chan = (void *) arg;
1564 bh_lock_sock(chan->sk);
1565 l2cap_send_ack(chan);
1566 bh_unlock_sock(chan->sk);
1569 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1571 struct sock *sk = chan->sk;
1573 chan->expected_ack_seq = 0;
1574 chan->unacked_frames = 0;
1575 chan->buffer_seq = 0;
1576 chan->num_acked = 0;
1577 chan->frames_sent = 0;
1579 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1580 (unsigned long) chan);
1581 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1582 (unsigned long) chan);
1583 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1585 skb_queue_head_init(&chan->srej_q);
1586 skb_queue_head_init(&chan->busy_q);
1588 INIT_LIST_HEAD(&chan->srej_l);
1590 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1592 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1595 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1598 case L2CAP_MODE_STREAMING:
1599 case L2CAP_MODE_ERTM:
1600 if (l2cap_mode_supported(mode, remote_feat_mask))
1604 return L2CAP_MODE_BASIC;
1608 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1610 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1611 struct l2cap_conf_req *req = data;
1612 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1613 void *ptr = req->data;
1615 BT_DBG("chan %p", chan);
1617 if (chan->num_conf_req || chan->num_conf_rsp)
1620 switch (chan->mode) {
1621 case L2CAP_MODE_STREAMING:
1622 case L2CAP_MODE_ERTM:
1623 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1628 chan->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1633 if (chan->imtu != L2CAP_DEFAULT_MTU)
1634 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1636 switch (chan->mode) {
1637 case L2CAP_MODE_BASIC:
1638 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1639 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1642 rfc.mode = L2CAP_MODE_BASIC;
1644 rfc.max_transmit = 0;
1645 rfc.retrans_timeout = 0;
1646 rfc.monitor_timeout = 0;
1647 rfc.max_pdu_size = 0;
1649 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1650 (unsigned long) &rfc);
1653 case L2CAP_MODE_ERTM:
1654 rfc.mode = L2CAP_MODE_ERTM;
1655 rfc.txwin_size = chan->tx_win;
1656 rfc.max_transmit = chan->max_tx;
1657 rfc.retrans_timeout = 0;
1658 rfc.monitor_timeout = 0;
1659 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1660 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1661 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1663 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1664 (unsigned long) &rfc);
1666 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1669 if (chan->fcs == L2CAP_FCS_NONE ||
1670 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1671 chan->fcs = L2CAP_FCS_NONE;
1672 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1676 case L2CAP_MODE_STREAMING:
1677 rfc.mode = L2CAP_MODE_STREAMING;
1679 rfc.max_transmit = 0;
1680 rfc.retrans_timeout = 0;
1681 rfc.monitor_timeout = 0;
1682 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1683 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1684 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1686 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1687 (unsigned long) &rfc);
1689 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1692 if (chan->fcs == L2CAP_FCS_NONE ||
1693 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1694 chan->fcs = L2CAP_FCS_NONE;
1695 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1700 req->dcid = cpu_to_le16(pi->dcid);
1701 req->flags = cpu_to_le16(0);
1706 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1708 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1709 struct l2cap_conf_rsp *rsp = data;
1710 void *ptr = rsp->data;
1711 void *req = chan->conf_req;
1712 int len = chan->conf_len;
1713 int type, hint, olen;
1715 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1716 u16 mtu = L2CAP_DEFAULT_MTU;
1717 u16 result = L2CAP_CONF_SUCCESS;
1719 BT_DBG("chan %p", chan);
1721 while (len >= L2CAP_CONF_OPT_SIZE) {
1722 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1724 hint = type & L2CAP_CONF_HINT;
1725 type &= L2CAP_CONF_MASK;
1728 case L2CAP_CONF_MTU:
1732 case L2CAP_CONF_FLUSH_TO:
1733 chan->flush_to = val;
1736 case L2CAP_CONF_QOS:
1739 case L2CAP_CONF_RFC:
1740 if (olen == sizeof(rfc))
1741 memcpy(&rfc, (void *) val, olen);
1744 case L2CAP_CONF_FCS:
1745 if (val == L2CAP_FCS_NONE)
1746 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1754 result = L2CAP_CONF_UNKNOWN;
1755 *((u8 *) ptr++) = type;
1760 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1763 switch (chan->mode) {
1764 case L2CAP_MODE_STREAMING:
1765 case L2CAP_MODE_ERTM:
1766 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1767 chan->mode = l2cap_select_mode(rfc.mode,
1768 pi->conn->feat_mask);
1772 if (chan->mode != rfc.mode)
1773 return -ECONNREFUSED;
1779 if (chan->mode != rfc.mode) {
1780 result = L2CAP_CONF_UNACCEPT;
1781 rfc.mode = chan->mode;
1783 if (chan->num_conf_rsp == 1)
1784 return -ECONNREFUSED;
1786 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1787 sizeof(rfc), (unsigned long) &rfc);
1791 if (result == L2CAP_CONF_SUCCESS) {
1792 /* Configure output options and let the other side know
1793 * which ones we don't like. */
1795 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1796 result = L2CAP_CONF_UNACCEPT;
1799 chan->conf_state |= L2CAP_CONF_MTU_DONE;
1801 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1804 case L2CAP_MODE_BASIC:
1805 chan->fcs = L2CAP_FCS_NONE;
1806 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1809 case L2CAP_MODE_ERTM:
1810 chan->remote_tx_win = rfc.txwin_size;
1811 chan->remote_max_tx = rfc.max_transmit;
1813 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1814 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1816 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1818 rfc.retrans_timeout =
1819 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1820 rfc.monitor_timeout =
1821 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1823 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1825 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1826 sizeof(rfc), (unsigned long) &rfc);
1830 case L2CAP_MODE_STREAMING:
1831 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1832 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1834 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1836 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1838 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1839 sizeof(rfc), (unsigned long) &rfc);
1844 result = L2CAP_CONF_UNACCEPT;
1846 memset(&rfc, 0, sizeof(rfc));
1847 rfc.mode = chan->mode;
1850 if (result == L2CAP_CONF_SUCCESS)
1851 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1853 rsp->scid = cpu_to_le16(pi->dcid);
1854 rsp->result = cpu_to_le16(result);
1855 rsp->flags = cpu_to_le16(0x0000);
1860 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
1862 struct sock *sk = chan->sk;
1863 struct l2cap_pinfo *pi = l2cap_pi(sk);
1864 struct l2cap_conf_req *req = data;
1865 void *ptr = req->data;
1868 struct l2cap_conf_rfc rfc;
1870 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1872 while (len >= L2CAP_CONF_OPT_SIZE) {
1873 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1876 case L2CAP_CONF_MTU:
1877 if (val < L2CAP_DEFAULT_MIN_MTU) {
1878 *result = L2CAP_CONF_UNACCEPT;
1879 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
1882 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1885 case L2CAP_CONF_FLUSH_TO:
1886 chan->flush_to = val;
1887 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1891 case L2CAP_CONF_RFC:
1892 if (olen == sizeof(rfc))
1893 memcpy(&rfc, (void *)val, olen);
1895 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1896 rfc.mode != chan->mode)
1897 return -ECONNREFUSED;
1901 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1902 sizeof(rfc), (unsigned long) &rfc);
1907 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
1908 return -ECONNREFUSED;
1910 chan->mode = rfc.mode;
1912 if (*result == L2CAP_CONF_SUCCESS) {
1914 case L2CAP_MODE_ERTM:
1915 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1916 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1917 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1919 case L2CAP_MODE_STREAMING:
1920 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1924 req->dcid = cpu_to_le16(pi->dcid);
1925 req->flags = cpu_to_le16(0x0000);
1930 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1932 struct l2cap_conf_rsp *rsp = data;
1933 void *ptr = rsp->data;
1935 BT_DBG("sk %p", sk);
1937 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1938 rsp->result = cpu_to_le16(result);
1939 rsp->flags = cpu_to_le16(flags);
1944 void __l2cap_connect_rsp_defer(struct sock *sk)
1946 struct l2cap_conn_rsp rsp;
1947 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1948 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1951 sk->sk_state = BT_CONFIG;
1953 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1954 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1955 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1956 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1957 l2cap_send_cmd(conn, chan->ident,
1958 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1960 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
1963 chan->conf_state |= L2CAP_CONF_REQ_SENT;
1964 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1965 l2cap_build_conf_req(chan, buf), buf);
1966 chan->num_conf_req++;
1969 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
1973 struct l2cap_conf_rfc rfc;
1975 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
1977 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
1980 while (len >= L2CAP_CONF_OPT_SIZE) {
1981 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1984 case L2CAP_CONF_RFC:
1985 if (olen == sizeof(rfc))
1986 memcpy(&rfc, (void *)val, olen);
1993 case L2CAP_MODE_ERTM:
1994 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1995 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1996 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1998 case L2CAP_MODE_STREAMING:
1999 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2003 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2005 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2007 if (rej->reason != 0x0000)
2010 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2011 cmd->ident == conn->info_ident) {
2012 del_timer(&conn->info_timer);
2014 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2015 conn->info_ident = 0;
2017 l2cap_conn_start(conn);
2023 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2025 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2026 struct l2cap_conn_rsp rsp;
2027 struct l2cap_chan *chan = NULL;
2028 struct sock *parent, *sk = NULL;
2029 int result, status = L2CAP_CS_NO_INFO;
2031 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2032 __le16 psm = req->psm;
2034 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2036 /* Check if we have socket listening on psm */
2037 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2039 result = L2CAP_CR_BAD_PSM;
2043 bh_lock_sock(parent);
2045 /* Check if the ACL is secure enough (if not SDP) */
2046 if (psm != cpu_to_le16(0x0001) &&
2047 !hci_conn_check_link_mode(conn->hcon)) {
2048 conn->disc_reason = 0x05;
2049 result = L2CAP_CR_SEC_BLOCK;
2053 result = L2CAP_CR_NO_MEM;
2055 /* Check for backlog size */
2056 if (sk_acceptq_is_full(parent)) {
2057 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2061 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2065 chan = l2cap_chan_alloc(sk);
2067 l2cap_sock_kill(sk);
2071 l2cap_pi(sk)->chan = chan;
2073 write_lock_bh(&conn->chan_lock);
2075 /* Check if we already have channel with that dcid */
2076 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2077 write_unlock_bh(&conn->chan_lock);
2078 sock_set_flag(sk, SOCK_ZAPPED);
2079 l2cap_sock_kill(sk);
2083 hci_conn_hold(conn->hcon);
2085 l2cap_sock_init(sk, parent);
2086 bacpy(&bt_sk(sk)->src, conn->src);
2087 bacpy(&bt_sk(sk)->dst, conn->dst);
2088 l2cap_pi(sk)->psm = psm;
2089 l2cap_pi(sk)->dcid = scid;
2091 bt_accept_enqueue(parent, sk);
2093 __l2cap_chan_add(conn, chan);
2095 dcid = l2cap_pi(sk)->scid;
2097 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2099 chan->ident = cmd->ident;
2101 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2102 if (l2cap_check_security(chan)) {
2103 if (bt_sk(sk)->defer_setup) {
2104 sk->sk_state = BT_CONNECT2;
2105 result = L2CAP_CR_PEND;
2106 status = L2CAP_CS_AUTHOR_PEND;
2107 parent->sk_data_ready(parent, 0);
2109 sk->sk_state = BT_CONFIG;
2110 result = L2CAP_CR_SUCCESS;
2111 status = L2CAP_CS_NO_INFO;
2114 sk->sk_state = BT_CONNECT2;
2115 result = L2CAP_CR_PEND;
2116 status = L2CAP_CS_AUTHEN_PEND;
2119 sk->sk_state = BT_CONNECT2;
2120 result = L2CAP_CR_PEND;
2121 status = L2CAP_CS_NO_INFO;
2124 write_unlock_bh(&conn->chan_lock);
2127 bh_unlock_sock(parent);
2130 rsp.scid = cpu_to_le16(scid);
2131 rsp.dcid = cpu_to_le16(dcid);
2132 rsp.result = cpu_to_le16(result);
2133 rsp.status = cpu_to_le16(status);
2134 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2136 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2137 struct l2cap_info_req info;
2138 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2140 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2141 conn->info_ident = l2cap_get_ident(conn);
2143 mod_timer(&conn->info_timer, jiffies +
2144 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2146 l2cap_send_cmd(conn, conn->info_ident,
2147 L2CAP_INFO_REQ, sizeof(info), &info);
2150 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2151 result == L2CAP_CR_SUCCESS) {
2153 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2154 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2155 l2cap_build_conf_req(chan, buf), buf);
2156 chan->num_conf_req++;
2162 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2164 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2165 u16 scid, dcid, result, status;
2166 struct l2cap_chan *chan;
2170 scid = __le16_to_cpu(rsp->scid);
2171 dcid = __le16_to_cpu(rsp->dcid);
2172 result = __le16_to_cpu(rsp->result);
2173 status = __le16_to_cpu(rsp->status);
2175 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2178 chan = l2cap_get_chan_by_scid(conn, scid);
2182 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2190 case L2CAP_CR_SUCCESS:
2191 sk->sk_state = BT_CONFIG;
2193 l2cap_pi(sk)->dcid = dcid;
2194 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2196 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2199 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2201 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2202 l2cap_build_conf_req(chan, req), req);
2203 chan->num_conf_req++;
2207 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2211 /* don't delete l2cap channel if sk is owned by user */
2212 if (sock_owned_by_user(sk)) {
2213 sk->sk_state = BT_DISCONN;
2214 l2cap_sock_clear_timer(sk);
2215 l2cap_sock_set_timer(sk, HZ / 5);
2219 l2cap_chan_del(chan, ECONNREFUSED);
2227 static inline void set_default_fcs(struct l2cap_chan *chan)
2229 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2231 /* FCS is enabled only in ERTM or streaming mode, if one or both
2234 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2235 chan->fcs = L2CAP_FCS_NONE;
2236 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2237 chan->fcs = L2CAP_FCS_CRC16;
2240 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2242 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2245 struct l2cap_chan *chan;
2249 dcid = __le16_to_cpu(req->dcid);
2250 flags = __le16_to_cpu(req->flags);
2252 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2254 chan = l2cap_get_chan_by_scid(conn, dcid);
2260 if (sk->sk_state != BT_CONFIG) {
2261 struct l2cap_cmd_rej rej;
2263 rej.reason = cpu_to_le16(0x0002);
2264 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2269 /* Reject if config buffer is too small. */
2270 len = cmd_len - sizeof(*req);
2271 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2272 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2273 l2cap_build_conf_rsp(sk, rsp,
2274 L2CAP_CONF_REJECT, flags), rsp);
2279 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2280 chan->conf_len += len;
2282 if (flags & 0x0001) {
2283 /* Incomplete config. Send empty response. */
2284 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2285 l2cap_build_conf_rsp(sk, rsp,
2286 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2290 /* Complete config. */
2291 len = l2cap_parse_conf_req(chan, rsp);
2293 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2297 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2298 chan->num_conf_rsp++;
2300 /* Reset config buffer. */
2303 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2306 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2307 set_default_fcs(chan);
2309 sk->sk_state = BT_CONNECTED;
2311 chan->next_tx_seq = 0;
2312 chan->expected_tx_seq = 0;
2313 skb_queue_head_init(&chan->tx_q);
2314 if (chan->mode == L2CAP_MODE_ERTM)
2315 l2cap_ertm_init(chan);
2317 l2cap_chan_ready(sk);
2321 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2323 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2324 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2325 l2cap_build_conf_req(chan, buf), buf);
2326 chan->num_conf_req++;
2334 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2336 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2337 u16 scid, flags, result;
2338 struct l2cap_chan *chan;
2340 int len = cmd->len - sizeof(*rsp);
2342 scid = __le16_to_cpu(rsp->scid);
2343 flags = __le16_to_cpu(rsp->flags);
2344 result = __le16_to_cpu(rsp->result);
2346 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2347 scid, flags, result);
2349 chan = l2cap_get_chan_by_scid(conn, scid);
2356 case L2CAP_CONF_SUCCESS:
2357 l2cap_conf_rfc_get(chan, rsp->data, len);
2360 case L2CAP_CONF_UNACCEPT:
2361 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2364 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2365 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2369 /* throw out any old stored conf requests */
2370 result = L2CAP_CONF_SUCCESS;
2371 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2374 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2378 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2379 L2CAP_CONF_REQ, len, req);
2380 chan->num_conf_req++;
2381 if (result != L2CAP_CONF_SUCCESS)
2387 sk->sk_err = ECONNRESET;
2388 l2cap_sock_set_timer(sk, HZ * 5);
2389 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2396 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2398 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2399 set_default_fcs(chan);
2401 sk->sk_state = BT_CONNECTED;
2402 chan->next_tx_seq = 0;
2403 chan->expected_tx_seq = 0;
2404 skb_queue_head_init(&chan->tx_q);
2405 if (chan->mode == L2CAP_MODE_ERTM)
2406 l2cap_ertm_init(chan);
2408 l2cap_chan_ready(sk);
2416 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2418 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2419 struct l2cap_disconn_rsp rsp;
2421 struct l2cap_chan *chan;
2424 scid = __le16_to_cpu(req->scid);
2425 dcid = __le16_to_cpu(req->dcid);
2427 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2429 chan = l2cap_get_chan_by_scid(conn, dcid);
2435 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2436 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2437 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2439 sk->sk_shutdown = SHUTDOWN_MASK;
2441 /* don't delete l2cap channel if sk is owned by user */
2442 if (sock_owned_by_user(sk)) {
2443 sk->sk_state = BT_DISCONN;
2444 l2cap_sock_clear_timer(sk);
2445 l2cap_sock_set_timer(sk, HZ / 5);
2450 l2cap_chan_del(chan, ECONNRESET);
2453 l2cap_sock_kill(sk);
2457 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2459 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2461 struct l2cap_chan *chan;
2464 scid = __le16_to_cpu(rsp->scid);
2465 dcid = __le16_to_cpu(rsp->dcid);
2467 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2469 chan = l2cap_get_chan_by_scid(conn, scid);
2475 /* don't delete l2cap channel if sk is owned by user */
2476 if (sock_owned_by_user(sk)) {
2477 sk->sk_state = BT_DISCONN;
2478 l2cap_sock_clear_timer(sk);
2479 l2cap_sock_set_timer(sk, HZ / 5);
2484 l2cap_chan_del(chan, 0);
2487 l2cap_sock_kill(sk);
2491 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2493 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2496 type = __le16_to_cpu(req->type);
2498 BT_DBG("type 0x%4.4x", type);
2500 if (type == L2CAP_IT_FEAT_MASK) {
2502 u32 feat_mask = l2cap_feat_mask;
2503 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2504 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2505 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2507 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2509 put_unaligned_le32(feat_mask, rsp->data);
2510 l2cap_send_cmd(conn, cmd->ident,
2511 L2CAP_INFO_RSP, sizeof(buf), buf);
2512 } else if (type == L2CAP_IT_FIXED_CHAN) {
2514 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2515 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2516 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2517 memcpy(buf + 4, l2cap_fixed_chan, 8);
2518 l2cap_send_cmd(conn, cmd->ident,
2519 L2CAP_INFO_RSP, sizeof(buf), buf);
2521 struct l2cap_info_rsp rsp;
2522 rsp.type = cpu_to_le16(type);
2523 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2524 l2cap_send_cmd(conn, cmd->ident,
2525 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2531 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2533 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2536 type = __le16_to_cpu(rsp->type);
2537 result = __le16_to_cpu(rsp->result);
2539 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2541 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2542 if (cmd->ident != conn->info_ident ||
2543 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2546 del_timer(&conn->info_timer);
2548 if (result != L2CAP_IR_SUCCESS) {
2549 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2550 conn->info_ident = 0;
2552 l2cap_conn_start(conn);
2557 if (type == L2CAP_IT_FEAT_MASK) {
2558 conn->feat_mask = get_unaligned_le32(rsp->data);
2560 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2561 struct l2cap_info_req req;
2562 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2564 conn->info_ident = l2cap_get_ident(conn);
2566 l2cap_send_cmd(conn, conn->info_ident,
2567 L2CAP_INFO_REQ, sizeof(req), &req);
2569 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2570 conn->info_ident = 0;
2572 l2cap_conn_start(conn);
2574 } else if (type == L2CAP_IT_FIXED_CHAN) {
2575 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2576 conn->info_ident = 0;
2578 l2cap_conn_start(conn);
2584 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2589 if (min > max || min < 6 || max > 3200)
2592 if (to_multiplier < 10 || to_multiplier > 3200)
2595 if (max >= to_multiplier * 8)
2598 max_latency = (to_multiplier * 8 / max) - 1;
2599 if (latency > 499 || latency > max_latency)
2605 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2606 struct l2cap_cmd_hdr *cmd, u8 *data)
2608 struct hci_conn *hcon = conn->hcon;
2609 struct l2cap_conn_param_update_req *req;
2610 struct l2cap_conn_param_update_rsp rsp;
2611 u16 min, max, latency, to_multiplier, cmd_len;
2614 if (!(hcon->link_mode & HCI_LM_MASTER))
2617 cmd_len = __le16_to_cpu(cmd->len);
2618 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2621 req = (struct l2cap_conn_param_update_req *) data;
2622 min = __le16_to_cpu(req->min);
2623 max = __le16_to_cpu(req->max);
2624 latency = __le16_to_cpu(req->latency);
2625 to_multiplier = __le16_to_cpu(req->to_multiplier);
2627 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2628 min, max, latency, to_multiplier);
2630 memset(&rsp, 0, sizeof(rsp));
2632 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2634 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2636 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2638 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2642 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2647 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2648 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2652 switch (cmd->code) {
2653 case L2CAP_COMMAND_REJ:
2654 l2cap_command_rej(conn, cmd, data);
2657 case L2CAP_CONN_REQ:
2658 err = l2cap_connect_req(conn, cmd, data);
2661 case L2CAP_CONN_RSP:
2662 err = l2cap_connect_rsp(conn, cmd, data);
2665 case L2CAP_CONF_REQ:
2666 err = l2cap_config_req(conn, cmd, cmd_len, data);
2669 case L2CAP_CONF_RSP:
2670 err = l2cap_config_rsp(conn, cmd, data);
2673 case L2CAP_DISCONN_REQ:
2674 err = l2cap_disconnect_req(conn, cmd, data);
2677 case L2CAP_DISCONN_RSP:
2678 err = l2cap_disconnect_rsp(conn, cmd, data);
2681 case L2CAP_ECHO_REQ:
2682 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2685 case L2CAP_ECHO_RSP:
2688 case L2CAP_INFO_REQ:
2689 err = l2cap_information_req(conn, cmd, data);
2692 case L2CAP_INFO_RSP:
2693 err = l2cap_information_rsp(conn, cmd, data);
2697 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2705 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2706 struct l2cap_cmd_hdr *cmd, u8 *data)
2708 switch (cmd->code) {
2709 case L2CAP_COMMAND_REJ:
2712 case L2CAP_CONN_PARAM_UPDATE_REQ:
2713 return l2cap_conn_param_update_req(conn, cmd, data);
2715 case L2CAP_CONN_PARAM_UPDATE_RSP:
2719 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2724 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2725 struct sk_buff *skb)
2727 u8 *data = skb->data;
2729 struct l2cap_cmd_hdr cmd;
2732 l2cap_raw_recv(conn, skb);
2734 while (len >= L2CAP_CMD_HDR_SIZE) {
2736 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2737 data += L2CAP_CMD_HDR_SIZE;
2738 len -= L2CAP_CMD_HDR_SIZE;
2740 cmd_len = le16_to_cpu(cmd.len);
2742 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2744 if (cmd_len > len || !cmd.ident) {
2745 BT_DBG("corrupted command");
2749 if (conn->hcon->type == LE_LINK)
2750 err = l2cap_le_sig_cmd(conn, &cmd, data);
2752 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2755 struct l2cap_cmd_rej rej;
2757 BT_ERR("Wrong link type (%d)", err);
2759 /* FIXME: Map err to a valid reason */
2760 rej.reason = cpu_to_le16(0);
2761 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2771 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
2773 u16 our_fcs, rcv_fcs;
2774 int hdr_size = L2CAP_HDR_SIZE + 2;
2776 if (chan->fcs == L2CAP_FCS_CRC16) {
2777 skb_trim(skb, skb->len - 2);
2778 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2779 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2781 if (our_fcs != rcv_fcs)
2787 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2791 chan->frames_sent = 0;
2793 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2795 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2796 control |= L2CAP_SUPER_RCV_NOT_READY;
2797 l2cap_send_sframe(chan, control);
2798 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2801 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2802 l2cap_retransmit_frames(chan);
2804 l2cap_ertm_send(chan);
2806 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2807 chan->frames_sent == 0) {
2808 control |= L2CAP_SUPER_RCV_READY;
2809 l2cap_send_sframe(chan, control);
2813 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2815 struct sk_buff *next_skb;
2816 int tx_seq_offset, next_tx_seq_offset;
2818 bt_cb(skb)->tx_seq = tx_seq;
2819 bt_cb(skb)->sar = sar;
2821 next_skb = skb_peek(&chan->srej_q);
2823 __skb_queue_tail(&chan->srej_q, skb);
2827 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2828 if (tx_seq_offset < 0)
2829 tx_seq_offset += 64;
2832 if (bt_cb(next_skb)->tx_seq == tx_seq)
2835 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2836 chan->buffer_seq) % 64;
2837 if (next_tx_seq_offset < 0)
2838 next_tx_seq_offset += 64;
2840 if (next_tx_seq_offset > tx_seq_offset) {
2841 __skb_queue_before(&chan->srej_q, next_skb, skb);
2845 if (skb_queue_is_last(&chan->srej_q, next_skb))
2848 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2850 __skb_queue_tail(&chan->srej_q, skb);
2855 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2857 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2858 struct sk_buff *_skb;
2861 switch (control & L2CAP_CTRL_SAR) {
2862 case L2CAP_SDU_UNSEGMENTED:
2863 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2866 err = sock_queue_rcv_skb(chan->sk, skb);
2872 case L2CAP_SDU_START:
2873 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2876 chan->sdu_len = get_unaligned_le16(skb->data);
2878 if (chan->sdu_len > chan->imtu)
2881 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2885 /* pull sdu_len bytes only after alloc, because of Local Busy
2886 * condition we have to be sure that this will be executed
2887 * only once, i.e., when alloc does not fail */
2890 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2892 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2893 chan->partial_sdu_len = skb->len;
2896 case L2CAP_SDU_CONTINUE:
2897 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2903 chan->partial_sdu_len += skb->len;
2904 if (chan->partial_sdu_len > chan->sdu_len)
2907 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2912 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2918 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2919 chan->partial_sdu_len += skb->len;
2921 if (chan->partial_sdu_len > chan->imtu)
2924 if (chan->partial_sdu_len != chan->sdu_len)
2927 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2930 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2932 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2936 err = sock_queue_rcv_skb(chan->sk, _skb);
2939 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2943 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2944 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2946 kfree_skb(chan->sdu);
2954 kfree_skb(chan->sdu);
2958 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
2963 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2965 struct sk_buff *skb;
2969 while ((skb = skb_dequeue(&chan->busy_q))) {
2970 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2971 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2973 skb_queue_head(&chan->busy_q, skb);
2977 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2980 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2983 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2984 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2985 l2cap_send_sframe(chan, control);
2986 chan->retry_count = 1;
2988 del_timer(&chan->retrans_timer);
2989 __mod_monitor_timer();
2991 chan->conn_state |= L2CAP_CONN_WAIT_F;
2994 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2995 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
2997 BT_DBG("chan %p, Exit local busy", chan);
3002 static void l2cap_busy_work(struct work_struct *work)
3004 DECLARE_WAITQUEUE(wait, current);
3005 struct l2cap_chan *chan =
3006 container_of(work, struct l2cap_chan, busy_work);
3007 struct sock *sk = chan->sk;
3008 int n_tries = 0, timeo = HZ/5, err;
3009 struct sk_buff *skb;
3013 add_wait_queue(sk_sleep(sk), &wait);
3014 while ((skb = skb_peek(&chan->busy_q))) {
3015 set_current_state(TASK_INTERRUPTIBLE);
3017 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3019 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, EBUSY);
3026 if (signal_pending(current)) {
3027 err = sock_intr_errno(timeo);
3032 timeo = schedule_timeout(timeo);
3035 err = sock_error(sk);
3039 if (l2cap_try_push_rx_skb(chan) == 0)
3043 set_current_state(TASK_RUNNING);
3044 remove_wait_queue(sk_sleep(sk), &wait);
3049 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3053 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3054 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3055 __skb_queue_tail(&chan->busy_q, skb);
3056 return l2cap_try_push_rx_skb(chan);
3061 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3063 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3067 /* Busy Condition */
3068 BT_DBG("chan %p, Enter local busy", chan);
3070 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3071 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3072 __skb_queue_tail(&chan->busy_q, skb);
3074 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3075 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3076 l2cap_send_sframe(chan, sctrl);
3078 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3080 del_timer(&chan->ack_timer);
3082 queue_work(_busy_wq, &chan->busy_work);
3087 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3089 struct sk_buff *_skb;
3093 * TODO: We have to notify the userland if some data is lost with the
3097 switch (control & L2CAP_CTRL_SAR) {
3098 case L2CAP_SDU_UNSEGMENTED:
3099 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3100 kfree_skb(chan->sdu);
3104 err = sock_queue_rcv_skb(chan->sk, skb);
3110 case L2CAP_SDU_START:
3111 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3112 kfree_skb(chan->sdu);
3116 chan->sdu_len = get_unaligned_le16(skb->data);
3119 if (chan->sdu_len > chan->imtu) {
3124 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3130 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3132 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3133 chan->partial_sdu_len = skb->len;
3137 case L2CAP_SDU_CONTINUE:
3138 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3141 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3143 chan->partial_sdu_len += skb->len;
3144 if (chan->partial_sdu_len > chan->sdu_len)
3145 kfree_skb(chan->sdu);
3152 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3155 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3157 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3158 chan->partial_sdu_len += skb->len;
3160 if (chan->partial_sdu_len > chan->imtu)
3163 if (chan->partial_sdu_len == chan->sdu_len) {
3164 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3165 err = sock_queue_rcv_skb(chan->sk, _skb);
3172 kfree_skb(chan->sdu);
3180 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3182 struct sk_buff *skb;
3185 while ((skb = skb_peek(&chan->srej_q))) {
3186 if (bt_cb(skb)->tx_seq != tx_seq)
3189 skb = skb_dequeue(&chan->srej_q);
3190 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3191 l2cap_ertm_reassembly_sdu(chan, skb, control);
3192 chan->buffer_seq_srej =
3193 (chan->buffer_seq_srej + 1) % 64;
3194 tx_seq = (tx_seq + 1) % 64;
3198 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3200 struct srej_list *l, *tmp;
3203 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3204 if (l->tx_seq == tx_seq) {
3209 control = L2CAP_SUPER_SELECT_REJECT;
3210 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3211 l2cap_send_sframe(chan, control);
3213 list_add_tail(&l->list, &chan->srej_l);
3217 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3219 struct srej_list *new;
3222 while (tx_seq != chan->expected_tx_seq) {
3223 control = L2CAP_SUPER_SELECT_REJECT;
3224 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3225 l2cap_send_sframe(chan, control);
3227 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3228 new->tx_seq = chan->expected_tx_seq;
3229 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3230 list_add_tail(&new->list, &chan->srej_l);
3232 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3235 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3237 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3238 u8 tx_seq = __get_txseq(rx_control);
3239 u8 req_seq = __get_reqseq(rx_control);
3240 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3241 int tx_seq_offset, expected_tx_seq_offset;
3242 int num_to_ack = (chan->tx_win/6) + 1;
3245 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3246 tx_seq, rx_control);
3248 if (L2CAP_CTRL_FINAL & rx_control &&
3249 chan->conn_state & L2CAP_CONN_WAIT_F) {
3250 del_timer(&chan->monitor_timer);
3251 if (chan->unacked_frames > 0)
3252 __mod_retrans_timer();
3253 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3256 chan->expected_ack_seq = req_seq;
3257 l2cap_drop_acked_frames(chan);
3259 if (tx_seq == chan->expected_tx_seq)
3262 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3263 if (tx_seq_offset < 0)
3264 tx_seq_offset += 64;
3266 /* invalid tx_seq */
3267 if (tx_seq_offset >= chan->tx_win) {
3268 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3272 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3275 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3276 struct srej_list *first;
3278 first = list_first_entry(&chan->srej_l,
3279 struct srej_list, list);
3280 if (tx_seq == first->tx_seq) {
3281 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3282 l2cap_check_srej_gap(chan, tx_seq);
3284 list_del(&first->list);
3287 if (list_empty(&chan->srej_l)) {
3288 chan->buffer_seq = chan->buffer_seq_srej;
3289 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3290 l2cap_send_ack(chan);
3291 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3294 struct srej_list *l;
3296 /* duplicated tx_seq */
3297 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3300 list_for_each_entry(l, &chan->srej_l, list) {
3301 if (l->tx_seq == tx_seq) {
3302 l2cap_resend_srejframe(chan, tx_seq);
3306 l2cap_send_srejframe(chan, tx_seq);
3309 expected_tx_seq_offset =
3310 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3311 if (expected_tx_seq_offset < 0)
3312 expected_tx_seq_offset += 64;
3314 /* duplicated tx_seq */
3315 if (tx_seq_offset < expected_tx_seq_offset)
3318 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3320 BT_DBG("chan %p, Enter SREJ", chan);
3322 INIT_LIST_HEAD(&chan->srej_l);
3323 chan->buffer_seq_srej = chan->buffer_seq;
3325 __skb_queue_head_init(&chan->srej_q);
3326 __skb_queue_head_init(&chan->busy_q);
3327 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3329 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3331 l2cap_send_srejframe(chan, tx_seq);
3333 del_timer(&chan->ack_timer);
3338 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3340 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3341 bt_cb(skb)->tx_seq = tx_seq;
3342 bt_cb(skb)->sar = sar;
3343 __skb_queue_tail(&chan->srej_q, skb);
3347 err = l2cap_push_rx_skb(chan, skb, rx_control);
3351 if (rx_control & L2CAP_CTRL_FINAL) {
3352 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3353 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3355 l2cap_retransmit_frames(chan);
3360 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3361 if (chan->num_acked == num_to_ack - 1)
3362 l2cap_send_ack(chan);
3371 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3373 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3376 chan->expected_ack_seq = __get_reqseq(rx_control);
3377 l2cap_drop_acked_frames(chan);
3379 if (rx_control & L2CAP_CTRL_POLL) {
3380 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3381 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3382 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3383 (chan->unacked_frames > 0))
3384 __mod_retrans_timer();
3386 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3387 l2cap_send_srejtail(chan);
3389 l2cap_send_i_or_rr_or_rnr(chan);
3392 } else if (rx_control & L2CAP_CTRL_FINAL) {
3393 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3395 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3396 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3398 l2cap_retransmit_frames(chan);
3401 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3402 (chan->unacked_frames > 0))
3403 __mod_retrans_timer();
3405 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3406 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3407 l2cap_send_ack(chan);
3409 l2cap_ertm_send(chan);
3413 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3415 u8 tx_seq = __get_reqseq(rx_control);
3417 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3419 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3421 chan->expected_ack_seq = tx_seq;
3422 l2cap_drop_acked_frames(chan);
3424 if (rx_control & L2CAP_CTRL_FINAL) {
3425 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3426 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3428 l2cap_retransmit_frames(chan);
3430 l2cap_retransmit_frames(chan);
3432 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3433 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3436 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3438 u8 tx_seq = __get_reqseq(rx_control);
3440 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3442 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3444 if (rx_control & L2CAP_CTRL_POLL) {
3445 chan->expected_ack_seq = tx_seq;
3446 l2cap_drop_acked_frames(chan);
3448 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3449 l2cap_retransmit_one_frame(chan, tx_seq);
3451 l2cap_ertm_send(chan);
3453 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3454 chan->srej_save_reqseq = tx_seq;
3455 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3457 } else if (rx_control & L2CAP_CTRL_FINAL) {
3458 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3459 chan->srej_save_reqseq == tx_seq)
3460 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3462 l2cap_retransmit_one_frame(chan, tx_seq);
3464 l2cap_retransmit_one_frame(chan, tx_seq);
3465 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3466 chan->srej_save_reqseq = tx_seq;
3467 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3472 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3474 u8 tx_seq = __get_reqseq(rx_control);
3476 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3478 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3479 chan->expected_ack_seq = tx_seq;
3480 l2cap_drop_acked_frames(chan);
3482 if (rx_control & L2CAP_CTRL_POLL)
3483 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3485 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3486 del_timer(&chan->retrans_timer);
3487 if (rx_control & L2CAP_CTRL_POLL)
3488 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3492 if (rx_control & L2CAP_CTRL_POLL)
3493 l2cap_send_srejtail(chan);
3495 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3498 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3500 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3502 if (L2CAP_CTRL_FINAL & rx_control &&
3503 chan->conn_state & L2CAP_CONN_WAIT_F) {
3504 del_timer(&chan->monitor_timer);
3505 if (chan->unacked_frames > 0)
3506 __mod_retrans_timer();
3507 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3510 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3511 case L2CAP_SUPER_RCV_READY:
3512 l2cap_data_channel_rrframe(chan, rx_control);
3515 case L2CAP_SUPER_REJECT:
3516 l2cap_data_channel_rejframe(chan, rx_control);
3519 case L2CAP_SUPER_SELECT_REJECT:
3520 l2cap_data_channel_srejframe(chan, rx_control);
3523 case L2CAP_SUPER_RCV_NOT_READY:
3524 l2cap_data_channel_rnrframe(chan, rx_control);
3532 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3534 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3535 struct l2cap_pinfo *pi = l2cap_pi(sk);
3538 int len, next_tx_seq_offset, req_seq_offset;
3540 control = get_unaligned_le16(skb->data);
3545 * We can just drop the corrupted I-frame here.
3546 * Receiver will miss it and start proper recovery
3547 * procedures and ask retransmission.
3549 if (l2cap_check_fcs(chan, skb))
3552 if (__is_sar_start(control) && __is_iframe(control))
3555 if (chan->fcs == L2CAP_FCS_CRC16)
3558 if (len > chan->mps) {
3559 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3563 req_seq = __get_reqseq(control);
3564 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3565 if (req_seq_offset < 0)
3566 req_seq_offset += 64;
3568 next_tx_seq_offset =
3569 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3570 if (next_tx_seq_offset < 0)
3571 next_tx_seq_offset += 64;
3573 /* check for invalid req-seq */
3574 if (req_seq_offset > next_tx_seq_offset) {
3575 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3579 if (__is_iframe(control)) {
3581 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3585 l2cap_data_channel_iframe(chan, control, skb);
3589 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3593 l2cap_data_channel_sframe(chan, control, skb);
3603 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3605 struct l2cap_chan *chan;
3607 struct l2cap_pinfo *pi;
3612 chan = l2cap_get_chan_by_scid(conn, cid);
3614 BT_DBG("unknown cid 0x%4.4x", cid);
3621 BT_DBG("chan %p, len %d", chan, skb->len);
3623 if (sk->sk_state != BT_CONNECTED)
3626 switch (chan->mode) {
3627 case L2CAP_MODE_BASIC:
3628 /* If socket recv buffers overflows we drop data here
3629 * which is *bad* because L2CAP has to be reliable.
3630 * But we don't have any other choice. L2CAP doesn't
3631 * provide flow control mechanism. */
3633 if (chan->imtu < skb->len)
3636 if (!sock_queue_rcv_skb(sk, skb))
3640 case L2CAP_MODE_ERTM:
3641 if (!sock_owned_by_user(sk)) {
3642 l2cap_ertm_data_rcv(sk, skb);
3644 if (sk_add_backlog(sk, skb))
3650 case L2CAP_MODE_STREAMING:
3651 control = get_unaligned_le16(skb->data);
3655 if (l2cap_check_fcs(chan, skb))
3658 if (__is_sar_start(control))
3661 if (chan->fcs == L2CAP_FCS_CRC16)
3664 if (len > chan->mps || len < 0 || __is_sframe(control))
3667 tx_seq = __get_txseq(control);
3669 if (chan->expected_tx_seq == tx_seq)
3670 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3672 chan->expected_tx_seq = (tx_seq + 1) % 64;
3674 l2cap_streaming_reassembly_sdu(chan, skb, control);
3679 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3693 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3697 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3703 BT_DBG("sk %p, len %d", sk, skb->len);
3705 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3708 if (l2cap_pi(sk)->chan->imtu < skb->len)
3711 if (!sock_queue_rcv_skb(sk, skb))
3723 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3727 sk = l2cap_get_sock_by_scid(0, cid, conn->src);
3733 BT_DBG("sk %p, len %d", sk, skb->len);
3735 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3738 if (l2cap_pi(sk)->chan->imtu < skb->len)
3741 if (!sock_queue_rcv_skb(sk, skb))
3753 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3755 struct l2cap_hdr *lh = (void *) skb->data;
3759 skb_pull(skb, L2CAP_HDR_SIZE);
3760 cid = __le16_to_cpu(lh->cid);
3761 len = __le16_to_cpu(lh->len);
3763 if (len != skb->len) {
3768 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3771 case L2CAP_CID_LE_SIGNALING:
3772 case L2CAP_CID_SIGNALING:
3773 l2cap_sig_channel(conn, skb);
3776 case L2CAP_CID_CONN_LESS:
3777 psm = get_unaligned_le16(skb->data);
3779 l2cap_conless_channel(conn, psm, skb);
3782 case L2CAP_CID_LE_DATA:
3783 l2cap_att_channel(conn, cid, skb);
3787 l2cap_data_channel(conn, cid, skb);
3792 /* ---- L2CAP interface with lower layer (HCI) ---- */
3794 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3796 int exact = 0, lm1 = 0, lm2 = 0;
3797 register struct sock *sk;
3798 struct hlist_node *node;
3800 if (type != ACL_LINK)
3803 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3805 /* Find listening sockets and check their link_mode */
3806 read_lock(&l2cap_sk_list.lock);
3807 sk_for_each(sk, node, &l2cap_sk_list.head) {
3808 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3810 if (sk->sk_state != BT_LISTEN)
3813 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3814 lm1 |= HCI_LM_ACCEPT;
3815 if (chan->role_switch)
3816 lm1 |= HCI_LM_MASTER;
3818 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3819 lm2 |= HCI_LM_ACCEPT;
3820 if (chan->role_switch)
3821 lm2 |= HCI_LM_MASTER;
3824 read_unlock(&l2cap_sk_list.lock);
3826 return exact ? lm1 : lm2;
3829 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3831 struct l2cap_conn *conn;
3833 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3835 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3839 conn = l2cap_conn_add(hcon, status);
3841 l2cap_conn_ready(conn);
3843 l2cap_conn_del(hcon, bt_err(status));
3848 static int l2cap_disconn_ind(struct hci_conn *hcon)
3850 struct l2cap_conn *conn = hcon->l2cap_data;
3852 BT_DBG("hcon %p", hcon);
3854 if (hcon->type != ACL_LINK || !conn)
3857 return conn->disc_reason;
3860 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3862 BT_DBG("hcon %p reason %d", hcon, reason);
3864 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3867 l2cap_conn_del(hcon, bt_err(reason));
3872 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3874 struct sock *sk = chan->sk;
3876 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3879 if (encrypt == 0x00) {
3880 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3881 l2cap_sock_clear_timer(sk);
3882 l2cap_sock_set_timer(sk, HZ * 5);
3883 } else if (chan->sec_level == BT_SECURITY_HIGH)
3884 __l2cap_sock_close(sk, ECONNREFUSED);
3886 if (chan->sec_level == BT_SECURITY_MEDIUM)
3887 l2cap_sock_clear_timer(sk);
3891 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3893 struct l2cap_conn *conn = hcon->l2cap_data;
3894 struct l2cap_chan *chan;
3899 BT_DBG("conn %p", conn);
3901 read_lock(&conn->chan_lock);
3903 list_for_each_entry(chan, &conn->chan_l, list) {
3904 struct sock *sk = chan->sk;
3908 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
3913 if (!status && (sk->sk_state == BT_CONNECTED ||
3914 sk->sk_state == BT_CONFIG)) {
3915 l2cap_check_encryption(chan, encrypt);
3920 if (sk->sk_state == BT_CONNECT) {
3922 struct l2cap_conn_req req;
3923 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3924 req.psm = l2cap_pi(sk)->psm;
3926 chan->ident = l2cap_get_ident(conn);
3927 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
3929 l2cap_send_cmd(conn, chan->ident,
3930 L2CAP_CONN_REQ, sizeof(req), &req);
3932 l2cap_sock_clear_timer(sk);
3933 l2cap_sock_set_timer(sk, HZ / 10);
3935 } else if (sk->sk_state == BT_CONNECT2) {
3936 struct l2cap_conn_rsp rsp;
3940 sk->sk_state = BT_CONFIG;
3941 result = L2CAP_CR_SUCCESS;
3943 sk->sk_state = BT_DISCONN;
3944 l2cap_sock_set_timer(sk, HZ / 10);
3945 result = L2CAP_CR_SEC_BLOCK;
3948 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3949 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3950 rsp.result = cpu_to_le16(result);
3951 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3952 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3959 read_unlock(&conn->chan_lock);
3964 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3966 struct l2cap_conn *conn = hcon->l2cap_data;
3969 conn = l2cap_conn_add(hcon, 0);
3974 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3976 if (!(flags & ACL_CONT)) {
3977 struct l2cap_hdr *hdr;
3978 struct l2cap_chan *chan;
3983 BT_ERR("Unexpected start frame (len %d)", skb->len);
3984 kfree_skb(conn->rx_skb);
3985 conn->rx_skb = NULL;
3987 l2cap_conn_unreliable(conn, ECOMM);
3990 /* Start fragment always begin with Basic L2CAP header */
3991 if (skb->len < L2CAP_HDR_SIZE) {
3992 BT_ERR("Frame is too short (len %d)", skb->len);
3993 l2cap_conn_unreliable(conn, ECOMM);
3997 hdr = (struct l2cap_hdr *) skb->data;
3998 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3999 cid = __le16_to_cpu(hdr->cid);
4001 if (len == skb->len) {
4002 /* Complete frame received */
4003 l2cap_recv_frame(conn, skb);
4007 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4009 if (skb->len > len) {
4010 BT_ERR("Frame is too long (len %d, expected len %d)",
4012 l2cap_conn_unreliable(conn, ECOMM);
4016 chan = l2cap_get_chan_by_scid(conn, cid);
4018 if (chan && chan->sk) {
4019 struct sock *sk = chan->sk;
4021 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4022 BT_ERR("Frame exceeding recv MTU (len %d, "
4026 l2cap_conn_unreliable(conn, ECOMM);
4032 /* Allocate skb for the complete frame (with header) */
4033 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4037 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4039 conn->rx_len = len - skb->len;
4041 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4043 if (!conn->rx_len) {
4044 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4045 l2cap_conn_unreliable(conn, ECOMM);
4049 if (skb->len > conn->rx_len) {
4050 BT_ERR("Fragment is too long (len %d, expected %d)",
4051 skb->len, conn->rx_len);
4052 kfree_skb(conn->rx_skb);
4053 conn->rx_skb = NULL;
4055 l2cap_conn_unreliable(conn, ECOMM);
4059 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4061 conn->rx_len -= skb->len;
4063 if (!conn->rx_len) {
4064 /* Complete frame received */
4065 l2cap_recv_frame(conn, conn->rx_skb);
4066 conn->rx_skb = NULL;
4075 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4078 struct hlist_node *node;
4080 read_lock_bh(&l2cap_sk_list.lock);
4082 sk_for_each(sk, node, &l2cap_sk_list.head) {
4083 struct l2cap_pinfo *pi = l2cap_pi(sk);
4084 struct l2cap_chan *chan = pi->chan;
4086 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4087 batostr(&bt_sk(sk)->src),
4088 batostr(&bt_sk(sk)->dst),
4089 sk->sk_state, __le16_to_cpu(pi->psm),
4091 chan->imtu, chan->omtu, chan->sec_level,
4095 read_unlock_bh(&l2cap_sk_list.lock);
4100 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4102 return single_open(file, l2cap_debugfs_show, inode->i_private);
4105 static const struct file_operations l2cap_debugfs_fops = {
4106 .open = l2cap_debugfs_open,
4108 .llseek = seq_lseek,
4109 .release = single_release,
4112 static struct dentry *l2cap_debugfs;
4114 static struct hci_proto l2cap_hci_proto = {
4116 .id = HCI_PROTO_L2CAP,
4117 .connect_ind = l2cap_connect_ind,
4118 .connect_cfm = l2cap_connect_cfm,
4119 .disconn_ind = l2cap_disconn_ind,
4120 .disconn_cfm = l2cap_disconn_cfm,
4121 .security_cfm = l2cap_security_cfm,
4122 .recv_acldata = l2cap_recv_acldata
4125 int __init l2cap_init(void)
4129 err = l2cap_init_sockets();
4133 _busy_wq = create_singlethread_workqueue("l2cap");
4139 err = hci_register_proto(&l2cap_hci_proto);
4141 BT_ERR("L2CAP protocol registration failed");
4142 bt_sock_unregister(BTPROTO_L2CAP);
4147 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4148 bt_debugfs, NULL, &l2cap_debugfs_fops);
4150 BT_ERR("Failed to create L2CAP debug file");
4156 destroy_workqueue(_busy_wq);
4157 l2cap_cleanup_sockets();
4161 void l2cap_exit(void)
4163 debugfs_remove(l2cap_debugfs);
4165 flush_workqueue(_busy_wq);
4166 destroy_workqueue(_busy_wq);
4168 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4169 BT_ERR("L2CAP protocol unregistration failed");
4171 l2cap_cleanup_sockets();
4174 module_param(disable_ertm, bool, 0644);
4175 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");