2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 /* ---- L2CAP channels ---- */
77 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
92 list_for_each_entry(c, &conn->chan_l, list) {
99 /* Find channel with given SCID.
100 * Returns locked socket */
101 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 struct l2cap_chan *c;
105 mutex_lock(&conn->chan_lock);
106 c = __l2cap_get_chan_by_scid(conn, cid);
107 mutex_unlock(&conn->chan_lock);
112 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
114 struct l2cap_chan *c;
116 list_for_each_entry(c, &conn->chan_l, list) {
117 if (c->ident == ident)
123 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
125 struct l2cap_chan *c;
127 mutex_lock(&conn->chan_lock);
128 c = __l2cap_get_chan_by_ident(conn, ident);
129 mutex_unlock(&conn->chan_lock);
134 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &chan_list, global_l) {
139 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
145 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
149 write_lock(&chan_list_lock);
151 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 for (p = 0x1001; p < 0x1100; p += 2)
165 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
166 chan->psm = cpu_to_le16(p);
167 chan->sport = cpu_to_le16(p);
174 write_unlock(&chan_list_lock);
178 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
180 write_lock(&chan_list_lock);
184 write_unlock(&chan_list_lock);
189 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
191 u16 cid = L2CAP_CID_DYN_START;
193 for (; cid < L2CAP_CID_DYN_END; cid++) {
194 if (!__l2cap_get_chan_by_scid(conn, cid))
201 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
203 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
204 state_to_string(state));
207 chan->ops->state_change(chan->data, state);
210 static void l2cap_state_change(struct l2cap_chan *chan, int state)
212 struct sock *sk = chan->sk;
215 __l2cap_state_change(chan, state);
219 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
221 struct sock *sk = chan->sk;
226 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
228 struct sock *sk = chan->sk;
231 __l2cap_chan_set_err(chan, err);
235 static void l2cap_chan_timeout(struct work_struct *work)
237 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
239 struct l2cap_conn *conn = chan->conn;
242 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
244 mutex_lock(&conn->chan_lock);
245 l2cap_chan_lock(chan);
247 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
248 reason = ECONNREFUSED;
249 else if (chan->state == BT_CONNECT &&
250 chan->sec_level != BT_SECURITY_SDP)
251 reason = ECONNREFUSED;
255 l2cap_chan_close(chan, reason);
257 l2cap_chan_unlock(chan);
259 chan->ops->close(chan->data);
260 mutex_unlock(&conn->chan_lock);
262 l2cap_chan_put(chan);
265 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
267 struct l2cap_chan *chan;
269 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
273 mutex_init(&chan->lock);
277 write_lock(&chan_list_lock);
278 list_add(&chan->global_l, &chan_list);
279 write_unlock(&chan_list_lock);
281 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
283 chan->state = BT_OPEN;
285 atomic_set(&chan->refcnt, 1);
287 BT_DBG("sk %p chan %p", sk, chan);
292 void l2cap_chan_destroy(struct l2cap_chan *chan)
294 write_lock(&chan_list_lock);
295 list_del(&chan->global_l);
296 write_unlock(&chan_list_lock);
298 l2cap_chan_put(chan);
301 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
303 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
304 __le16_to_cpu(chan->psm), chan->dcid);
306 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
310 switch (chan->chan_type) {
311 case L2CAP_CHAN_CONN_ORIENTED:
312 if (conn->hcon->type == LE_LINK) {
314 chan->omtu = L2CAP_LE_DEFAULT_MTU;
315 chan->scid = L2CAP_CID_LE_DATA;
316 chan->dcid = L2CAP_CID_LE_DATA;
318 /* Alloc CID for connection-oriented socket */
319 chan->scid = l2cap_alloc_cid(conn);
320 chan->omtu = L2CAP_DEFAULT_MTU;
324 case L2CAP_CHAN_CONN_LESS:
325 /* Connectionless socket */
326 chan->scid = L2CAP_CID_CONN_LESS;
327 chan->dcid = L2CAP_CID_CONN_LESS;
328 chan->omtu = L2CAP_DEFAULT_MTU;
332 /* Raw socket can send/recv signalling messages only */
333 chan->scid = L2CAP_CID_SIGNALING;
334 chan->dcid = L2CAP_CID_SIGNALING;
335 chan->omtu = L2CAP_DEFAULT_MTU;
338 chan->local_id = L2CAP_BESTEFFORT_ID;
339 chan->local_stype = L2CAP_SERV_BESTEFFORT;
340 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
341 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
342 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
343 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
345 l2cap_chan_hold(chan);
347 list_add(&chan->list, &conn->chan_l);
350 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
352 mutex_lock(&conn->chan_lock);
353 __l2cap_chan_add(conn, chan);
354 mutex_unlock(&conn->chan_lock);
357 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
359 struct sock *sk = chan->sk;
360 struct l2cap_conn *conn = chan->conn;
361 struct sock *parent = bt_sk(sk)->parent;
363 __clear_chan_timer(chan);
365 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
368 /* Delete from channel list */
369 list_del(&chan->list);
371 l2cap_chan_put(chan);
374 hci_conn_put(conn->hcon);
379 __l2cap_state_change(chan, BT_CLOSED);
380 sock_set_flag(sk, SOCK_ZAPPED);
383 __l2cap_chan_set_err(chan, err);
386 bt_accept_unlink(sk);
387 parent->sk_data_ready(parent, 0);
389 sk->sk_state_change(sk);
393 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
394 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
397 skb_queue_purge(&chan->tx_q);
399 if (chan->mode == L2CAP_MODE_ERTM) {
400 struct srej_list *l, *tmp;
402 __clear_retrans_timer(chan);
403 __clear_monitor_timer(chan);
404 __clear_ack_timer(chan);
406 skb_queue_purge(&chan->srej_q);
408 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
415 static void l2cap_chan_cleanup_listen(struct sock *parent)
419 BT_DBG("parent %p", parent);
421 /* Close not yet accepted channels */
422 while ((sk = bt_accept_dequeue(parent, NULL))) {
423 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
425 l2cap_chan_lock(chan);
426 __clear_chan_timer(chan);
427 l2cap_chan_close(chan, ECONNRESET);
428 l2cap_chan_unlock(chan);
430 chan->ops->close(chan->data);
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
439 BT_DBG("chan %p state %s sk %p", chan,
440 state_to_string(chan->state), sk);
442 switch (chan->state) {
445 l2cap_chan_cleanup_listen(sk);
447 __l2cap_state_change(chan, BT_CLOSED);
448 sock_set_flag(sk, SOCK_ZAPPED);
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 __clear_chan_timer(chan);
457 __set_chan_timer(chan, sk->sk_sndtimeo);
458 l2cap_send_disconn_req(conn, chan, reason);
460 l2cap_chan_del(chan, reason);
464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
465 conn->hcon->type == ACL_LINK) {
466 struct l2cap_conn_rsp rsp;
469 if (bt_sk(sk)->defer_setup)
470 result = L2CAP_CR_SEC_BLOCK;
472 result = L2CAP_CR_BAD_PSM;
473 l2cap_state_change(chan, BT_DISCONN);
475 rsp.scid = cpu_to_le16(chan->dcid);
476 rsp.dcid = cpu_to_le16(chan->scid);
477 rsp.result = cpu_to_le16(result);
478 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
479 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
483 l2cap_chan_del(chan, reason);
488 l2cap_chan_del(chan, reason);
493 sock_set_flag(sk, SOCK_ZAPPED);
499 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
501 if (chan->chan_type == L2CAP_CHAN_RAW) {
502 switch (chan->sec_level) {
503 case BT_SECURITY_HIGH:
504 return HCI_AT_DEDICATED_BONDING_MITM;
505 case BT_SECURITY_MEDIUM:
506 return HCI_AT_DEDICATED_BONDING;
508 return HCI_AT_NO_BONDING;
510 } else if (chan->psm == cpu_to_le16(0x0001)) {
511 if (chan->sec_level == BT_SECURITY_LOW)
512 chan->sec_level = BT_SECURITY_SDP;
514 if (chan->sec_level == BT_SECURITY_HIGH)
515 return HCI_AT_NO_BONDING_MITM;
517 return HCI_AT_NO_BONDING;
519 switch (chan->sec_level) {
520 case BT_SECURITY_HIGH:
521 return HCI_AT_GENERAL_BONDING_MITM;
522 case BT_SECURITY_MEDIUM:
523 return HCI_AT_GENERAL_BONDING;
525 return HCI_AT_NO_BONDING;
530 /* Service level security */
531 int l2cap_chan_check_security(struct l2cap_chan *chan)
533 struct l2cap_conn *conn = chan->conn;
536 auth_type = l2cap_get_auth_type(chan);
538 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
541 static u8 l2cap_get_ident(struct l2cap_conn *conn)
545 /* Get next available identificator.
546 * 1 - 128 are used by kernel.
547 * 129 - 199 are reserved.
548 * 200 - 254 are used by utilities like l2ping, etc.
551 spin_lock(&conn->lock);
553 if (++conn->tx_ident > 128)
558 spin_unlock(&conn->lock);
563 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
565 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
568 BT_DBG("code 0x%2.2x", code);
573 if (lmp_no_flush_capable(conn->hcon->hdev))
574 flags = ACL_START_NO_FLUSH;
578 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
579 skb->priority = HCI_PRIO_MAX;
581 hci_send_acl(conn->hchan, skb, flags);
584 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
586 struct hci_conn *hcon = chan->conn->hcon;
589 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
592 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
593 lmp_no_flush_capable(hcon->hdev))
594 flags = ACL_START_NO_FLUSH;
598 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
599 hci_send_acl(chan->conn->hchan, skb, flags);
602 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
605 struct l2cap_hdr *lh;
606 struct l2cap_conn *conn = chan->conn;
609 if (chan->state != BT_CONNECTED)
612 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
613 hlen = L2CAP_EXT_HDR_SIZE;
615 hlen = L2CAP_ENH_HDR_SIZE;
617 if (chan->fcs == L2CAP_FCS_CRC16)
618 hlen += L2CAP_FCS_SIZE;
620 BT_DBG("chan %p, control 0x%8.8x", chan, control);
622 count = min_t(unsigned int, conn->mtu, hlen);
624 control |= __set_sframe(chan);
626 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
627 control |= __set_ctrl_final(chan);
629 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
630 control |= __set_ctrl_poll(chan);
632 skb = bt_skb_alloc(count, GFP_ATOMIC);
636 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
637 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
638 lh->cid = cpu_to_le16(chan->dcid);
640 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
642 if (chan->fcs == L2CAP_FCS_CRC16) {
643 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
644 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
647 skb->priority = HCI_PRIO_MAX;
648 l2cap_do_send(chan, skb);
651 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
653 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
654 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
655 set_bit(CONN_RNR_SENT, &chan->conn_state);
657 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
659 control |= __set_reqseq(chan, chan->buffer_seq);
661 l2cap_send_sframe(chan, control);
664 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
666 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
669 static void l2cap_send_conn_req(struct l2cap_chan *chan)
671 struct l2cap_conn *conn = chan->conn;
672 struct l2cap_conn_req req;
674 req.scid = cpu_to_le16(chan->scid);
677 chan->ident = l2cap_get_ident(conn);
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
684 static void l2cap_do_start(struct l2cap_chan *chan)
686 struct l2cap_conn *conn = chan->conn;
688 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
689 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
692 if (l2cap_chan_check_security(chan) &&
693 __l2cap_no_conn_pending(chan))
694 l2cap_send_conn_req(chan);
696 struct l2cap_info_req req;
697 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
699 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
700 conn->info_ident = l2cap_get_ident(conn);
702 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
704 l2cap_send_cmd(conn, conn->info_ident,
705 L2CAP_INFO_REQ, sizeof(req), &req);
709 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
711 u32 local_feat_mask = l2cap_feat_mask;
713 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
716 case L2CAP_MODE_ERTM:
717 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
718 case L2CAP_MODE_STREAMING:
719 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
725 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
727 struct sock *sk = chan->sk;
728 struct l2cap_disconn_req req;
733 if (chan->mode == L2CAP_MODE_ERTM) {
734 __clear_retrans_timer(chan);
735 __clear_monitor_timer(chan);
736 __clear_ack_timer(chan);
739 req.dcid = cpu_to_le16(chan->dcid);
740 req.scid = cpu_to_le16(chan->scid);
741 l2cap_send_cmd(conn, l2cap_get_ident(conn),
742 L2CAP_DISCONN_REQ, sizeof(req), &req);
745 __l2cap_state_change(chan, BT_DISCONN);
746 __l2cap_chan_set_err(chan, err);
750 /* ---- L2CAP connections ---- */
751 static void l2cap_conn_start(struct l2cap_conn *conn)
753 struct l2cap_chan *chan, *tmp;
755 BT_DBG("conn %p", conn);
757 mutex_lock(&conn->chan_lock);
759 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
760 struct sock *sk = chan->sk;
762 l2cap_chan_lock(chan);
764 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
765 l2cap_chan_unlock(chan);
769 if (chan->state == BT_CONNECT) {
770 if (!l2cap_chan_check_security(chan) ||
771 !__l2cap_no_conn_pending(chan)) {
772 l2cap_chan_unlock(chan);
776 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
777 && test_bit(CONF_STATE2_DEVICE,
778 &chan->conf_state)) {
779 l2cap_chan_close(chan, ECONNRESET);
780 l2cap_chan_unlock(chan);
784 l2cap_send_conn_req(chan);
786 } else if (chan->state == BT_CONNECT2) {
787 struct l2cap_conn_rsp rsp;
789 rsp.scid = cpu_to_le16(chan->dcid);
790 rsp.dcid = cpu_to_le16(chan->scid);
792 if (l2cap_chan_check_security(chan)) {
794 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
799 parent->sk_data_ready(parent, 0);
802 __l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
808 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
809 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
812 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
815 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
816 rsp.result != L2CAP_CR_SUCCESS) {
817 l2cap_chan_unlock(chan);
821 set_bit(CONF_REQ_SENT, &chan->conf_state);
822 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
823 l2cap_build_conf_req(chan, buf), buf);
824 chan->num_conf_req++;
827 l2cap_chan_unlock(chan);
830 mutex_unlock(&conn->chan_lock);
833 /* Find socket with cid and source bdaddr.
834 * Returns closest match, locked.
836 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
839 struct l2cap_chan *c, *c1 = NULL;
841 read_lock(&chan_list_lock);
843 list_for_each_entry(c, &chan_list, global_l) {
844 struct sock *sk = c->sk;
846 if (state && c->state != state)
849 if (c->scid == cid) {
851 if (!bacmp(&bt_sk(sk)->src, src)) {
852 read_unlock(&chan_list_lock);
857 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
862 read_unlock(&chan_list_lock);
867 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
869 struct sock *parent, *sk;
870 struct l2cap_chan *chan, *pchan;
874 /* Check if we have socket listening on cid */
875 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
884 /* Check for backlog size */
885 if (sk_acceptq_is_full(parent)) {
886 BT_DBG("backlog full %d", parent->sk_ack_backlog);
890 chan = pchan->ops->new_connection(pchan->data);
896 hci_conn_hold(conn->hcon);
898 bacpy(&bt_sk(sk)->src, conn->src);
899 bacpy(&bt_sk(sk)->dst, conn->dst);
901 bt_accept_enqueue(parent, sk);
903 l2cap_chan_add(conn, chan);
905 __set_chan_timer(chan, sk->sk_sndtimeo);
907 __l2cap_state_change(chan, BT_CONNECTED);
908 parent->sk_data_ready(parent, 0);
911 release_sock(parent);
914 static void l2cap_chan_ready(struct l2cap_chan *chan)
916 struct sock *sk = chan->sk;
921 parent = bt_sk(sk)->parent;
923 BT_DBG("sk %p, parent %p", sk, parent);
925 chan->conf_state = 0;
926 __clear_chan_timer(chan);
928 __l2cap_state_change(chan, BT_CONNECTED);
929 sk->sk_state_change(sk);
932 parent->sk_data_ready(parent, 0);
937 static void l2cap_conn_ready(struct l2cap_conn *conn)
939 struct l2cap_chan *chan;
941 BT_DBG("conn %p", conn);
943 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
944 l2cap_le_conn_ready(conn);
946 if (conn->hcon->out && conn->hcon->type == LE_LINK)
947 smp_conn_security(conn, conn->hcon->pending_sec_level);
949 mutex_lock(&conn->chan_lock);
951 list_for_each_entry(chan, &conn->chan_l, list) {
953 l2cap_chan_lock(chan);
955 if (conn->hcon->type == LE_LINK) {
956 if (smp_conn_security(conn, chan->sec_level))
957 l2cap_chan_ready(chan);
959 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
960 struct sock *sk = chan->sk;
961 __clear_chan_timer(chan);
963 __l2cap_state_change(chan, BT_CONNECTED);
964 sk->sk_state_change(sk);
967 } else if (chan->state == BT_CONNECT)
968 l2cap_do_start(chan);
970 l2cap_chan_unlock(chan);
973 mutex_unlock(&conn->chan_lock);
976 /* Notify sockets that we cannot guaranty reliability anymore */
977 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
979 struct l2cap_chan *chan;
981 BT_DBG("conn %p", conn);
983 mutex_lock(&conn->chan_lock);
985 list_for_each_entry(chan, &conn->chan_l, list) {
986 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
987 __l2cap_chan_set_err(chan, err);
990 mutex_unlock(&conn->chan_lock);
993 static void l2cap_info_timeout(struct work_struct *work)
995 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
998 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
999 conn->info_ident = 0;
1001 l2cap_conn_start(conn);
1004 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1006 struct l2cap_conn *conn = hcon->l2cap_data;
1007 struct l2cap_chan *chan, *l;
1012 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1014 kfree_skb(conn->rx_skb);
1016 mutex_lock(&conn->chan_lock);
1019 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1020 l2cap_chan_lock(chan);
1022 l2cap_chan_del(chan, err);
1024 l2cap_chan_unlock(chan);
1026 chan->ops->close(chan->data);
1029 mutex_unlock(&conn->chan_lock);
1031 hci_chan_del(conn->hchan);
1033 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1034 cancel_delayed_work_sync(&conn->info_timer);
1036 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1037 cancel_delayed_work_sync(&conn->security_timer);
1038 smp_chan_destroy(conn);
1041 hcon->l2cap_data = NULL;
1045 static void security_timeout(struct work_struct *work)
1047 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1048 security_timer.work);
1050 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1053 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1055 struct l2cap_conn *conn = hcon->l2cap_data;
1056 struct hci_chan *hchan;
1061 hchan = hci_chan_create(hcon);
1065 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1067 hci_chan_del(hchan);
1071 hcon->l2cap_data = conn;
1073 conn->hchan = hchan;
1075 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1077 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1078 conn->mtu = hcon->hdev->le_mtu;
1080 conn->mtu = hcon->hdev->acl_mtu;
1082 conn->src = &hcon->hdev->bdaddr;
1083 conn->dst = &hcon->dst;
1085 conn->feat_mask = 0;
1087 spin_lock_init(&conn->lock);
1088 mutex_init(&conn->chan_lock);
1090 INIT_LIST_HEAD(&conn->chan_l);
1092 if (hcon->type == LE_LINK)
1093 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1095 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1097 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1102 /* ---- Socket interface ---- */
1104 /* Find socket with psm and source bdaddr.
1105 * Returns closest match.
1107 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1109 struct l2cap_chan *c, *c1 = NULL;
1111 read_lock(&chan_list_lock);
1113 list_for_each_entry(c, &chan_list, global_l) {
1114 struct sock *sk = c->sk;
1116 if (state && c->state != state)
1119 if (c->psm == psm) {
1121 if (!bacmp(&bt_sk(sk)->src, src)) {
1122 read_unlock(&chan_list_lock);
1127 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1132 read_unlock(&chan_list_lock);
1137 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1139 struct sock *sk = chan->sk;
1140 bdaddr_t *src = &bt_sk(sk)->src;
1141 struct l2cap_conn *conn;
1142 struct hci_conn *hcon;
1143 struct hci_dev *hdev;
1147 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1148 __le16_to_cpu(chan->psm));
1150 hdev = hci_get_route(dst, src);
1152 return -EHOSTUNREACH;
1156 l2cap_chan_lock(chan);
1158 /* PSM must be odd and lsb of upper byte must be 0 */
1159 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1160 chan->chan_type != L2CAP_CHAN_RAW) {
1165 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1170 switch (chan->mode) {
1171 case L2CAP_MODE_BASIC:
1173 case L2CAP_MODE_ERTM:
1174 case L2CAP_MODE_STREAMING:
1185 switch (sk->sk_state) {
1189 /* Already connecting */
1195 /* Already connected */
1211 /* Set destination address and psm */
1212 bacpy(&bt_sk(sk)->dst, dst);
1219 auth_type = l2cap_get_auth_type(chan);
1221 if (chan->dcid == L2CAP_CID_LE_DATA)
1222 hcon = hci_connect(hdev, LE_LINK, dst,
1223 chan->sec_level, auth_type);
1225 hcon = hci_connect(hdev, ACL_LINK, dst,
1226 chan->sec_level, auth_type);
1229 err = PTR_ERR(hcon);
1233 conn = l2cap_conn_add(hcon, 0);
1240 /* Update source addr of the socket */
1241 bacpy(src, conn->src);
1243 l2cap_chan_unlock(chan);
1244 l2cap_chan_add(conn, chan);
1245 l2cap_chan_lock(chan);
1247 l2cap_state_change(chan, BT_CONNECT);
1248 __set_chan_timer(chan, sk->sk_sndtimeo);
1250 if (hcon->state == BT_CONNECTED) {
1251 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1252 __clear_chan_timer(chan);
1253 if (l2cap_chan_check_security(chan))
1254 l2cap_state_change(chan, BT_CONNECTED);
1256 l2cap_do_start(chan);
1262 l2cap_chan_unlock(chan);
1263 hci_dev_unlock(hdev);
1268 int __l2cap_wait_ack(struct sock *sk)
1270 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1271 DECLARE_WAITQUEUE(wait, current);
1275 add_wait_queue(sk_sleep(sk), &wait);
1276 set_current_state(TASK_INTERRUPTIBLE);
1277 while (chan->unacked_frames > 0 && chan->conn) {
1281 if (signal_pending(current)) {
1282 err = sock_intr_errno(timeo);
1287 timeo = schedule_timeout(timeo);
1289 set_current_state(TASK_INTERRUPTIBLE);
1291 err = sock_error(sk);
1295 set_current_state(TASK_RUNNING);
1296 remove_wait_queue(sk_sleep(sk), &wait);
1300 static void l2cap_monitor_timeout(struct work_struct *work)
1302 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1303 monitor_timer.work);
1305 BT_DBG("chan %p", chan);
1307 l2cap_chan_lock(chan);
1309 if (chan->retry_count >= chan->remote_max_tx) {
1310 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1311 l2cap_chan_unlock(chan);
1312 l2cap_chan_put(chan);
1316 chan->retry_count++;
1317 __set_monitor_timer(chan);
1319 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1320 l2cap_chan_unlock(chan);
1321 l2cap_chan_put(chan);
1324 static void l2cap_retrans_timeout(struct work_struct *work)
1326 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1327 retrans_timer.work);
1329 BT_DBG("chan %p", chan);
1331 l2cap_chan_lock(chan);
1333 chan->retry_count = 1;
1334 __set_monitor_timer(chan);
1336 set_bit(CONN_WAIT_F, &chan->conn_state);
1338 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1340 l2cap_chan_unlock(chan);
1341 l2cap_chan_put(chan);
1344 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1346 struct sk_buff *skb;
1348 while ((skb = skb_peek(&chan->tx_q)) &&
1349 chan->unacked_frames) {
1350 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1353 skb = skb_dequeue(&chan->tx_q);
1356 chan->unacked_frames--;
1359 if (!chan->unacked_frames)
1360 __clear_retrans_timer(chan);
1363 static void l2cap_streaming_send(struct l2cap_chan *chan)
1365 struct sk_buff *skb;
1369 while ((skb = skb_dequeue(&chan->tx_q))) {
1370 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1371 control |= __set_txseq(chan, chan->next_tx_seq);
1372 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1374 if (chan->fcs == L2CAP_FCS_CRC16) {
1375 fcs = crc16(0, (u8 *)skb->data,
1376 skb->len - L2CAP_FCS_SIZE);
1377 put_unaligned_le16(fcs,
1378 skb->data + skb->len - L2CAP_FCS_SIZE);
1381 l2cap_do_send(chan, skb);
1383 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1387 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1389 struct sk_buff *skb, *tx_skb;
1393 skb = skb_peek(&chan->tx_q);
1397 while (bt_cb(skb)->tx_seq != tx_seq) {
1398 if (skb_queue_is_last(&chan->tx_q, skb))
1401 skb = skb_queue_next(&chan->tx_q, skb);
1404 if (chan->remote_max_tx &&
1405 bt_cb(skb)->retries == chan->remote_max_tx) {
1406 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1410 tx_skb = skb_clone(skb, GFP_ATOMIC);
1411 bt_cb(skb)->retries++;
1413 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1414 control &= __get_sar_mask(chan);
1416 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1417 control |= __set_ctrl_final(chan);
1419 control |= __set_reqseq(chan, chan->buffer_seq);
1420 control |= __set_txseq(chan, tx_seq);
1422 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1424 if (chan->fcs == L2CAP_FCS_CRC16) {
1425 fcs = crc16(0, (u8 *)tx_skb->data,
1426 tx_skb->len - L2CAP_FCS_SIZE);
1427 put_unaligned_le16(fcs,
1428 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1431 l2cap_do_send(chan, tx_skb);
1434 static int l2cap_ertm_send(struct l2cap_chan *chan)
1436 struct sk_buff *skb, *tx_skb;
1441 if (chan->state != BT_CONNECTED)
1444 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1446 if (chan->remote_max_tx &&
1447 bt_cb(skb)->retries == chan->remote_max_tx) {
1448 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1452 tx_skb = skb_clone(skb, GFP_ATOMIC);
1454 bt_cb(skb)->retries++;
1456 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1457 control &= __get_sar_mask(chan);
1459 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1460 control |= __set_ctrl_final(chan);
1462 control |= __set_reqseq(chan, chan->buffer_seq);
1463 control |= __set_txseq(chan, chan->next_tx_seq);
1465 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1467 if (chan->fcs == L2CAP_FCS_CRC16) {
1468 fcs = crc16(0, (u8 *)skb->data,
1469 tx_skb->len - L2CAP_FCS_SIZE);
1470 put_unaligned_le16(fcs, skb->data +
1471 tx_skb->len - L2CAP_FCS_SIZE);
1474 l2cap_do_send(chan, tx_skb);
1476 __set_retrans_timer(chan);
1478 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1480 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1482 if (bt_cb(skb)->retries == 1) {
1483 chan->unacked_frames++;
1486 __clear_ack_timer(chan);
1489 chan->frames_sent++;
1491 if (skb_queue_is_last(&chan->tx_q, skb))
1492 chan->tx_send_head = NULL;
1494 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1500 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1504 if (!skb_queue_empty(&chan->tx_q))
1505 chan->tx_send_head = chan->tx_q.next;
1507 chan->next_tx_seq = chan->expected_ack_seq;
1508 ret = l2cap_ertm_send(chan);
1512 static void __l2cap_send_ack(struct l2cap_chan *chan)
1516 control |= __set_reqseq(chan, chan->buffer_seq);
1518 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1519 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1520 set_bit(CONN_RNR_SENT, &chan->conn_state);
1521 l2cap_send_sframe(chan, control);
1525 if (l2cap_ertm_send(chan) > 0)
1528 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1529 l2cap_send_sframe(chan, control);
1532 static void l2cap_send_ack(struct l2cap_chan *chan)
1534 __clear_ack_timer(chan);
1535 __l2cap_send_ack(chan);
1538 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1540 struct srej_list *tail;
1543 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1544 control |= __set_ctrl_final(chan);
1546 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1547 control |= __set_reqseq(chan, tail->tx_seq);
1549 l2cap_send_sframe(chan, control);
1552 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1553 struct msghdr *msg, int len,
1554 int count, struct sk_buff *skb)
1556 struct l2cap_conn *conn = chan->conn;
1557 struct sk_buff **frag;
1560 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1566 /* Continuation fragments (no L2CAP header) */
1567 frag = &skb_shinfo(skb)->frag_list;
1569 count = min_t(unsigned int, conn->mtu, len);
1571 *frag = chan->ops->alloc_skb(chan, count,
1572 msg->msg_flags & MSG_DONTWAIT,
1577 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1580 (*frag)->priority = skb->priority;
1585 frag = &(*frag)->next;
1591 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1592 struct msghdr *msg, size_t len,
1595 struct l2cap_conn *conn = chan->conn;
1596 struct sk_buff *skb;
1597 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1598 struct l2cap_hdr *lh;
1600 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1602 count = min_t(unsigned int, (conn->mtu - hlen), len);
1604 skb = chan->ops->alloc_skb(chan, count + hlen,
1605 msg->msg_flags & MSG_DONTWAIT, &err);
1608 return ERR_PTR(err);
1610 skb->priority = priority;
1612 /* Create L2CAP header */
1613 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1614 lh->cid = cpu_to_le16(chan->dcid);
1615 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1616 put_unaligned(chan->psm, skb_put(skb, 2));
1618 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1619 if (unlikely(err < 0)) {
1621 return ERR_PTR(err);
1626 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1627 struct msghdr *msg, size_t len,
1630 struct l2cap_conn *conn = chan->conn;
1631 struct sk_buff *skb;
1632 int err, count, hlen = L2CAP_HDR_SIZE;
1633 struct l2cap_hdr *lh;
1635 BT_DBG("chan %p len %d", chan, (int)len);
1637 count = min_t(unsigned int, (conn->mtu - hlen), len);
1639 skb = chan->ops->alloc_skb(chan, count + hlen,
1640 msg->msg_flags & MSG_DONTWAIT, &err);
1643 return ERR_PTR(err);
1645 skb->priority = priority;
1647 /* Create L2CAP header */
1648 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1649 lh->cid = cpu_to_le16(chan->dcid);
1650 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1652 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1653 if (unlikely(err < 0)) {
1655 return ERR_PTR(err);
1660 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1661 struct msghdr *msg, size_t len,
1662 u32 control, u16 sdulen)
1664 struct l2cap_conn *conn = chan->conn;
1665 struct sk_buff *skb;
1666 int err, count, hlen;
1667 struct l2cap_hdr *lh;
1669 BT_DBG("chan %p len %d", chan, (int)len);
1672 return ERR_PTR(-ENOTCONN);
1674 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1675 hlen = L2CAP_EXT_HDR_SIZE;
1677 hlen = L2CAP_ENH_HDR_SIZE;
1680 hlen += L2CAP_SDULEN_SIZE;
1682 if (chan->fcs == L2CAP_FCS_CRC16)
1683 hlen += L2CAP_FCS_SIZE;
1685 count = min_t(unsigned int, (conn->mtu - hlen), len);
1687 skb = chan->ops->alloc_skb(chan, count + hlen,
1688 msg->msg_flags & MSG_DONTWAIT, &err);
1691 return ERR_PTR(err);
1693 /* Create L2CAP header */
1694 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1695 lh->cid = cpu_to_le16(chan->dcid);
1696 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1698 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1701 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1703 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1704 if (unlikely(err < 0)) {
1706 return ERR_PTR(err);
1709 if (chan->fcs == L2CAP_FCS_CRC16)
1710 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1712 bt_cb(skb)->retries = 0;
1716 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1718 struct sk_buff *skb;
1719 struct sk_buff_head sar_queue;
1723 skb_queue_head_init(&sar_queue);
1724 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1725 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1727 return PTR_ERR(skb);
1729 __skb_queue_tail(&sar_queue, skb);
1730 len -= chan->remote_mps;
1731 size += chan->remote_mps;
1736 if (len > chan->remote_mps) {
1737 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1738 buflen = chan->remote_mps;
1740 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1744 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1746 skb_queue_purge(&sar_queue);
1747 return PTR_ERR(skb);
1750 __skb_queue_tail(&sar_queue, skb);
1754 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1755 if (chan->tx_send_head == NULL)
1756 chan->tx_send_head = sar_queue.next;
1761 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1764 struct sk_buff *skb;
1768 /* Connectionless channel */
1769 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1770 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1772 return PTR_ERR(skb);
1774 l2cap_do_send(chan, skb);
1778 switch (chan->mode) {
1779 case L2CAP_MODE_BASIC:
1780 /* Check outgoing MTU */
1781 if (len > chan->omtu)
1784 /* Create a basic PDU */
1785 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1787 return PTR_ERR(skb);
1789 l2cap_do_send(chan, skb);
1793 case L2CAP_MODE_ERTM:
1794 case L2CAP_MODE_STREAMING:
1795 /* Entire SDU fits into one PDU */
1796 if (len <= chan->remote_mps) {
1797 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1798 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1801 return PTR_ERR(skb);
1803 __skb_queue_tail(&chan->tx_q, skb);
1805 if (chan->tx_send_head == NULL)
1806 chan->tx_send_head = skb;
1809 /* Segment SDU into multiples PDUs */
1810 err = l2cap_sar_segment_sdu(chan, msg, len);
1815 if (chan->mode == L2CAP_MODE_STREAMING) {
1816 l2cap_streaming_send(chan);
1821 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1822 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1827 err = l2cap_ertm_send(chan);
1834 BT_DBG("bad state %1.1x", chan->mode);
1841 /* Copy frame to all raw sockets on that connection */
1842 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1844 struct sk_buff *nskb;
1845 struct l2cap_chan *chan;
1847 BT_DBG("conn %p", conn);
1849 mutex_lock(&conn->chan_lock);
1851 list_for_each_entry(chan, &conn->chan_l, list) {
1852 struct sock *sk = chan->sk;
1853 if (chan->chan_type != L2CAP_CHAN_RAW)
1856 /* Don't send frame to the socket it came from */
1859 nskb = skb_clone(skb, GFP_ATOMIC);
1863 if (chan->ops->recv(chan->data, nskb))
1867 mutex_unlock(&conn->chan_lock);
1870 /* ---- L2CAP signalling commands ---- */
1871 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1872 u8 code, u8 ident, u16 dlen, void *data)
1874 struct sk_buff *skb, **frag;
1875 struct l2cap_cmd_hdr *cmd;
1876 struct l2cap_hdr *lh;
1879 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1880 conn, code, ident, dlen);
1882 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1883 count = min_t(unsigned int, conn->mtu, len);
1885 skb = bt_skb_alloc(count, GFP_ATOMIC);
1889 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1890 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1892 if (conn->hcon->type == LE_LINK)
1893 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1895 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1897 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1900 cmd->len = cpu_to_le16(dlen);
1903 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1904 memcpy(skb_put(skb, count), data, count);
1910 /* Continuation fragments (no L2CAP header) */
1911 frag = &skb_shinfo(skb)->frag_list;
1913 count = min_t(unsigned int, conn->mtu, len);
1915 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1919 memcpy(skb_put(*frag, count), data, count);
1924 frag = &(*frag)->next;
1934 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1936 struct l2cap_conf_opt *opt = *ptr;
1939 len = L2CAP_CONF_OPT_SIZE + opt->len;
1947 *val = *((u8 *) opt->val);
1951 *val = get_unaligned_le16(opt->val);
1955 *val = get_unaligned_le32(opt->val);
1959 *val = (unsigned long) opt->val;
1963 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1967 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1969 struct l2cap_conf_opt *opt = *ptr;
1971 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1978 *((u8 *) opt->val) = val;
1982 put_unaligned_le16(val, opt->val);
1986 put_unaligned_le32(val, opt->val);
1990 memcpy(opt->val, (void *) val, len);
1994 *ptr += L2CAP_CONF_OPT_SIZE + len;
1997 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1999 struct l2cap_conf_efs efs;
2001 switch (chan->mode) {
2002 case L2CAP_MODE_ERTM:
2003 efs.id = chan->local_id;
2004 efs.stype = chan->local_stype;
2005 efs.msdu = cpu_to_le16(chan->local_msdu);
2006 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2007 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2008 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2011 case L2CAP_MODE_STREAMING:
2013 efs.stype = L2CAP_SERV_BESTEFFORT;
2014 efs.msdu = cpu_to_le16(chan->local_msdu);
2015 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2024 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2025 (unsigned long) &efs);
2028 static void l2cap_ack_timeout(struct work_struct *work)
2030 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2033 BT_DBG("chan %p", chan);
2035 l2cap_chan_lock(chan);
2037 __l2cap_send_ack(chan);
2039 l2cap_chan_unlock(chan);
2041 l2cap_chan_put(chan);
2044 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2046 chan->expected_ack_seq = 0;
2047 chan->unacked_frames = 0;
2048 chan->buffer_seq = 0;
2049 chan->num_acked = 0;
2050 chan->frames_sent = 0;
2052 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2053 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2054 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2056 skb_queue_head_init(&chan->srej_q);
2058 INIT_LIST_HEAD(&chan->srej_l);
2061 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2064 case L2CAP_MODE_STREAMING:
2065 case L2CAP_MODE_ERTM:
2066 if (l2cap_mode_supported(mode, remote_feat_mask))
2070 return L2CAP_MODE_BASIC;
2074 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2076 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2079 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2081 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2084 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2086 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2087 __l2cap_ews_supported(chan)) {
2088 /* use extended control field */
2089 set_bit(FLAG_EXT_CTRL, &chan->flags);
2090 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2092 chan->tx_win = min_t(u16, chan->tx_win,
2093 L2CAP_DEFAULT_TX_WINDOW);
2094 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2098 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2100 struct l2cap_conf_req *req = data;
2101 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2102 void *ptr = req->data;
2105 BT_DBG("chan %p", chan);
2107 if (chan->num_conf_req || chan->num_conf_rsp)
2110 switch (chan->mode) {
2111 case L2CAP_MODE_STREAMING:
2112 case L2CAP_MODE_ERTM:
2113 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2116 if (__l2cap_efs_supported(chan))
2117 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2121 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2126 if (chan->imtu != L2CAP_DEFAULT_MTU)
2127 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2129 switch (chan->mode) {
2130 case L2CAP_MODE_BASIC:
2131 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2132 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2135 rfc.mode = L2CAP_MODE_BASIC;
2137 rfc.max_transmit = 0;
2138 rfc.retrans_timeout = 0;
2139 rfc.monitor_timeout = 0;
2140 rfc.max_pdu_size = 0;
2142 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2143 (unsigned long) &rfc);
2146 case L2CAP_MODE_ERTM:
2147 rfc.mode = L2CAP_MODE_ERTM;
2148 rfc.max_transmit = chan->max_tx;
2149 rfc.retrans_timeout = 0;
2150 rfc.monitor_timeout = 0;
2152 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2153 L2CAP_EXT_HDR_SIZE -
2156 rfc.max_pdu_size = cpu_to_le16(size);
2158 l2cap_txwin_setup(chan);
2160 rfc.txwin_size = min_t(u16, chan->tx_win,
2161 L2CAP_DEFAULT_TX_WINDOW);
2163 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2164 (unsigned long) &rfc);
2166 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2167 l2cap_add_opt_efs(&ptr, chan);
2169 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2172 if (chan->fcs == L2CAP_FCS_NONE ||
2173 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2174 chan->fcs = L2CAP_FCS_NONE;
2175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2178 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2179 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2183 case L2CAP_MODE_STREAMING:
2184 rfc.mode = L2CAP_MODE_STREAMING;
2186 rfc.max_transmit = 0;
2187 rfc.retrans_timeout = 0;
2188 rfc.monitor_timeout = 0;
2190 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2191 L2CAP_EXT_HDR_SIZE -
2194 rfc.max_pdu_size = cpu_to_le16(size);
2196 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2197 (unsigned long) &rfc);
2199 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2200 l2cap_add_opt_efs(&ptr, chan);
2202 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2205 if (chan->fcs == L2CAP_FCS_NONE ||
2206 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2207 chan->fcs = L2CAP_FCS_NONE;
2208 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2213 req->dcid = cpu_to_le16(chan->dcid);
2214 req->flags = cpu_to_le16(0);
2219 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2221 struct l2cap_conf_rsp *rsp = data;
2222 void *ptr = rsp->data;
2223 void *req = chan->conf_req;
2224 int len = chan->conf_len;
2225 int type, hint, olen;
2227 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2228 struct l2cap_conf_efs efs;
2230 u16 mtu = L2CAP_DEFAULT_MTU;
2231 u16 result = L2CAP_CONF_SUCCESS;
2234 BT_DBG("chan %p", chan);
2236 while (len >= L2CAP_CONF_OPT_SIZE) {
2237 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2239 hint = type & L2CAP_CONF_HINT;
2240 type &= L2CAP_CONF_MASK;
2243 case L2CAP_CONF_MTU:
2247 case L2CAP_CONF_FLUSH_TO:
2248 chan->flush_to = val;
2251 case L2CAP_CONF_QOS:
2254 case L2CAP_CONF_RFC:
2255 if (olen == sizeof(rfc))
2256 memcpy(&rfc, (void *) val, olen);
2259 case L2CAP_CONF_FCS:
2260 if (val == L2CAP_FCS_NONE)
2261 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2264 case L2CAP_CONF_EFS:
2266 if (olen == sizeof(efs))
2267 memcpy(&efs, (void *) val, olen);
2270 case L2CAP_CONF_EWS:
2272 return -ECONNREFUSED;
2274 set_bit(FLAG_EXT_CTRL, &chan->flags);
2275 set_bit(CONF_EWS_RECV, &chan->conf_state);
2276 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2277 chan->remote_tx_win = val;
2284 result = L2CAP_CONF_UNKNOWN;
2285 *((u8 *) ptr++) = type;
2290 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2293 switch (chan->mode) {
2294 case L2CAP_MODE_STREAMING:
2295 case L2CAP_MODE_ERTM:
2296 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2297 chan->mode = l2cap_select_mode(rfc.mode,
2298 chan->conn->feat_mask);
2303 if (__l2cap_efs_supported(chan))
2304 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2306 return -ECONNREFUSED;
2309 if (chan->mode != rfc.mode)
2310 return -ECONNREFUSED;
2316 if (chan->mode != rfc.mode) {
2317 result = L2CAP_CONF_UNACCEPT;
2318 rfc.mode = chan->mode;
2320 if (chan->num_conf_rsp == 1)
2321 return -ECONNREFUSED;
2323 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2324 sizeof(rfc), (unsigned long) &rfc);
2327 if (result == L2CAP_CONF_SUCCESS) {
2328 /* Configure output options and let the other side know
2329 * which ones we don't like. */
2331 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2332 result = L2CAP_CONF_UNACCEPT;
2335 set_bit(CONF_MTU_DONE, &chan->conf_state);
2337 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2340 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2341 efs.stype != L2CAP_SERV_NOTRAFIC &&
2342 efs.stype != chan->local_stype) {
2344 result = L2CAP_CONF_UNACCEPT;
2346 if (chan->num_conf_req >= 1)
2347 return -ECONNREFUSED;
2349 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2351 (unsigned long) &efs);
2353 /* Send PENDING Conf Rsp */
2354 result = L2CAP_CONF_PENDING;
2355 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2360 case L2CAP_MODE_BASIC:
2361 chan->fcs = L2CAP_FCS_NONE;
2362 set_bit(CONF_MODE_DONE, &chan->conf_state);
2365 case L2CAP_MODE_ERTM:
2366 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2367 chan->remote_tx_win = rfc.txwin_size;
2369 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2371 chan->remote_max_tx = rfc.max_transmit;
2373 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2375 L2CAP_EXT_HDR_SIZE -
2378 rfc.max_pdu_size = cpu_to_le16(size);
2379 chan->remote_mps = size;
2381 rfc.retrans_timeout =
2382 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2383 rfc.monitor_timeout =
2384 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2386 set_bit(CONF_MODE_DONE, &chan->conf_state);
2388 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2389 sizeof(rfc), (unsigned long) &rfc);
2391 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2392 chan->remote_id = efs.id;
2393 chan->remote_stype = efs.stype;
2394 chan->remote_msdu = le16_to_cpu(efs.msdu);
2395 chan->remote_flush_to =
2396 le32_to_cpu(efs.flush_to);
2397 chan->remote_acc_lat =
2398 le32_to_cpu(efs.acc_lat);
2399 chan->remote_sdu_itime =
2400 le32_to_cpu(efs.sdu_itime);
2401 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2402 sizeof(efs), (unsigned long) &efs);
2406 case L2CAP_MODE_STREAMING:
2407 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2409 L2CAP_EXT_HDR_SIZE -
2412 rfc.max_pdu_size = cpu_to_le16(size);
2413 chan->remote_mps = size;
2415 set_bit(CONF_MODE_DONE, &chan->conf_state);
2417 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2418 sizeof(rfc), (unsigned long) &rfc);
2423 result = L2CAP_CONF_UNACCEPT;
2425 memset(&rfc, 0, sizeof(rfc));
2426 rfc.mode = chan->mode;
2429 if (result == L2CAP_CONF_SUCCESS)
2430 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2432 rsp->scid = cpu_to_le16(chan->dcid);
2433 rsp->result = cpu_to_le16(result);
2434 rsp->flags = cpu_to_le16(0x0000);
2439 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2441 struct l2cap_conf_req *req = data;
2442 void *ptr = req->data;
2445 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2446 struct l2cap_conf_efs efs;
2448 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2450 while (len >= L2CAP_CONF_OPT_SIZE) {
2451 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2454 case L2CAP_CONF_MTU:
2455 if (val < L2CAP_DEFAULT_MIN_MTU) {
2456 *result = L2CAP_CONF_UNACCEPT;
2457 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2460 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2463 case L2CAP_CONF_FLUSH_TO:
2464 chan->flush_to = val;
2465 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2469 case L2CAP_CONF_RFC:
2470 if (olen == sizeof(rfc))
2471 memcpy(&rfc, (void *)val, olen);
2473 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2474 rfc.mode != chan->mode)
2475 return -ECONNREFUSED;
2479 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2480 sizeof(rfc), (unsigned long) &rfc);
2483 case L2CAP_CONF_EWS:
2484 chan->tx_win = min_t(u16, val,
2485 L2CAP_DEFAULT_EXT_WINDOW);
2486 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2490 case L2CAP_CONF_EFS:
2491 if (olen == sizeof(efs))
2492 memcpy(&efs, (void *)val, olen);
2494 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2495 efs.stype != L2CAP_SERV_NOTRAFIC &&
2496 efs.stype != chan->local_stype)
2497 return -ECONNREFUSED;
2499 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2500 sizeof(efs), (unsigned long) &efs);
2505 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2506 return -ECONNREFUSED;
2508 chan->mode = rfc.mode;
2510 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2512 case L2CAP_MODE_ERTM:
2513 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2514 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2515 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2517 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2518 chan->local_msdu = le16_to_cpu(efs.msdu);
2519 chan->local_sdu_itime =
2520 le32_to_cpu(efs.sdu_itime);
2521 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2522 chan->local_flush_to =
2523 le32_to_cpu(efs.flush_to);
2527 case L2CAP_MODE_STREAMING:
2528 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2532 req->dcid = cpu_to_le16(chan->dcid);
2533 req->flags = cpu_to_le16(0x0000);
2538 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2540 struct l2cap_conf_rsp *rsp = data;
2541 void *ptr = rsp->data;
2543 BT_DBG("chan %p", chan);
2545 rsp->scid = cpu_to_le16(chan->dcid);
2546 rsp->result = cpu_to_le16(result);
2547 rsp->flags = cpu_to_le16(flags);
2552 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2554 struct l2cap_conn_rsp rsp;
2555 struct l2cap_conn *conn = chan->conn;
2558 rsp.scid = cpu_to_le16(chan->dcid);
2559 rsp.dcid = cpu_to_le16(chan->scid);
2560 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2561 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2562 l2cap_send_cmd(conn, chan->ident,
2563 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2565 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2568 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2569 l2cap_build_conf_req(chan, buf), buf);
2570 chan->num_conf_req++;
2573 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2577 struct l2cap_conf_rfc rfc;
2579 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2581 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2584 while (len >= L2CAP_CONF_OPT_SIZE) {
2585 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2588 case L2CAP_CONF_RFC:
2589 if (olen == sizeof(rfc))
2590 memcpy(&rfc, (void *)val, olen);
2595 /* Use sane default values in case a misbehaving remote device
2596 * did not send an RFC option.
2598 rfc.mode = chan->mode;
2599 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2600 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2601 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2603 BT_ERR("Expected RFC option was not found, using defaults");
2607 case L2CAP_MODE_ERTM:
2608 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2609 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2610 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2612 case L2CAP_MODE_STREAMING:
2613 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2617 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2619 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2621 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2624 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2625 cmd->ident == conn->info_ident) {
2626 cancel_delayed_work(&conn->info_timer);
2628 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2629 conn->info_ident = 0;
2631 l2cap_conn_start(conn);
2637 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2639 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2640 struct l2cap_conn_rsp rsp;
2641 struct l2cap_chan *chan = NULL, *pchan;
2642 struct sock *parent, *sk = NULL;
2643 int result, status = L2CAP_CS_NO_INFO;
2645 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2646 __le16 psm = req->psm;
2648 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2650 /* Check if we have socket listening on psm */
2651 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2653 result = L2CAP_CR_BAD_PSM;
2659 mutex_lock(&conn->chan_lock);
2662 /* Check if the ACL is secure enough (if not SDP) */
2663 if (psm != cpu_to_le16(0x0001) &&
2664 !hci_conn_check_link_mode(conn->hcon)) {
2665 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2666 result = L2CAP_CR_SEC_BLOCK;
2670 result = L2CAP_CR_NO_MEM;
2672 /* Check for backlog size */
2673 if (sk_acceptq_is_full(parent)) {
2674 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2678 chan = pchan->ops->new_connection(pchan->data);
2684 /* Check if we already have channel with that dcid */
2685 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2686 sock_set_flag(sk, SOCK_ZAPPED);
2687 chan->ops->close(chan->data);
2691 hci_conn_hold(conn->hcon);
2693 bacpy(&bt_sk(sk)->src, conn->src);
2694 bacpy(&bt_sk(sk)->dst, conn->dst);
2698 bt_accept_enqueue(parent, sk);
2700 __l2cap_chan_add(conn, chan);
2704 __set_chan_timer(chan, sk->sk_sndtimeo);
2706 chan->ident = cmd->ident;
2708 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2709 if (l2cap_chan_check_security(chan)) {
2710 if (bt_sk(sk)->defer_setup) {
2711 __l2cap_state_change(chan, BT_CONNECT2);
2712 result = L2CAP_CR_PEND;
2713 status = L2CAP_CS_AUTHOR_PEND;
2714 parent->sk_data_ready(parent, 0);
2716 __l2cap_state_change(chan, BT_CONFIG);
2717 result = L2CAP_CR_SUCCESS;
2718 status = L2CAP_CS_NO_INFO;
2721 __l2cap_state_change(chan, BT_CONNECT2);
2722 result = L2CAP_CR_PEND;
2723 status = L2CAP_CS_AUTHEN_PEND;
2726 __l2cap_state_change(chan, BT_CONNECT2);
2727 result = L2CAP_CR_PEND;
2728 status = L2CAP_CS_NO_INFO;
2732 release_sock(parent);
2733 mutex_unlock(&conn->chan_lock);
2736 rsp.scid = cpu_to_le16(scid);
2737 rsp.dcid = cpu_to_le16(dcid);
2738 rsp.result = cpu_to_le16(result);
2739 rsp.status = cpu_to_le16(status);
2740 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2742 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2743 struct l2cap_info_req info;
2744 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2746 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2747 conn->info_ident = l2cap_get_ident(conn);
2749 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
2751 l2cap_send_cmd(conn, conn->info_ident,
2752 L2CAP_INFO_REQ, sizeof(info), &info);
2755 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2756 result == L2CAP_CR_SUCCESS) {
2758 set_bit(CONF_REQ_SENT, &chan->conf_state);
2759 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2760 l2cap_build_conf_req(chan, buf), buf);
2761 chan->num_conf_req++;
2767 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2769 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2770 u16 scid, dcid, result, status;
2771 struct l2cap_chan *chan;
2775 scid = __le16_to_cpu(rsp->scid);
2776 dcid = __le16_to_cpu(rsp->dcid);
2777 result = __le16_to_cpu(rsp->result);
2778 status = __le16_to_cpu(rsp->status);
2780 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2781 dcid, scid, result, status);
2783 mutex_lock(&conn->chan_lock);
2786 chan = __l2cap_get_chan_by_scid(conn, scid);
2792 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2801 l2cap_chan_lock(chan);
2804 case L2CAP_CR_SUCCESS:
2805 l2cap_state_change(chan, BT_CONFIG);
2808 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2810 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2813 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2814 l2cap_build_conf_req(chan, req), req);
2815 chan->num_conf_req++;
2819 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2823 l2cap_chan_del(chan, ECONNREFUSED);
2827 l2cap_chan_unlock(chan);
2830 mutex_unlock(&conn->chan_lock);
2835 static inline void set_default_fcs(struct l2cap_chan *chan)
2837 /* FCS is enabled only in ERTM or streaming mode, if one or both
2840 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2841 chan->fcs = L2CAP_FCS_NONE;
2842 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2843 chan->fcs = L2CAP_FCS_CRC16;
2846 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2848 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2851 struct l2cap_chan *chan;
2854 dcid = __le16_to_cpu(req->dcid);
2855 flags = __le16_to_cpu(req->flags);
2857 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2859 chan = l2cap_get_chan_by_scid(conn, dcid);
2863 l2cap_chan_lock(chan);
2865 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2866 struct l2cap_cmd_rej_cid rej;
2868 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2869 rej.scid = cpu_to_le16(chan->scid);
2870 rej.dcid = cpu_to_le16(chan->dcid);
2872 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2877 /* Reject if config buffer is too small. */
2878 len = cmd_len - sizeof(*req);
2879 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2880 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2881 l2cap_build_conf_rsp(chan, rsp,
2882 L2CAP_CONF_REJECT, flags), rsp);
2887 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2888 chan->conf_len += len;
2890 if (flags & 0x0001) {
2891 /* Incomplete config. Send empty response. */
2892 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2893 l2cap_build_conf_rsp(chan, rsp,
2894 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2898 /* Complete config. */
2899 len = l2cap_parse_conf_req(chan, rsp);
2901 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2905 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2906 chan->num_conf_rsp++;
2908 /* Reset config buffer. */
2911 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2914 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2915 set_default_fcs(chan);
2917 l2cap_state_change(chan, BT_CONNECTED);
2919 chan->next_tx_seq = 0;
2920 chan->expected_tx_seq = 0;
2921 skb_queue_head_init(&chan->tx_q);
2922 if (chan->mode == L2CAP_MODE_ERTM)
2923 l2cap_ertm_init(chan);
2925 l2cap_chan_ready(chan);
2929 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2931 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2932 l2cap_build_conf_req(chan, buf), buf);
2933 chan->num_conf_req++;
2936 /* Got Conf Rsp PENDING from remote side and asume we sent
2937 Conf Rsp PENDING in the code above */
2938 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2939 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2941 /* check compatibility */
2943 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2944 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2946 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2947 l2cap_build_conf_rsp(chan, rsp,
2948 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2952 l2cap_chan_unlock(chan);
2956 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2958 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2959 u16 scid, flags, result;
2960 struct l2cap_chan *chan;
2961 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
2963 scid = __le16_to_cpu(rsp->scid);
2964 flags = __le16_to_cpu(rsp->flags);
2965 result = __le16_to_cpu(rsp->result);
2967 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
2970 chan = l2cap_get_chan_by_scid(conn, scid);
2974 l2cap_chan_lock(chan);
2977 case L2CAP_CONF_SUCCESS:
2978 l2cap_conf_rfc_get(chan, rsp->data, len);
2979 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2982 case L2CAP_CONF_PENDING:
2983 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2985 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2988 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2991 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2995 /* check compatibility */
2997 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2998 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3000 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3001 l2cap_build_conf_rsp(chan, buf,
3002 L2CAP_CONF_SUCCESS, 0x0000), buf);
3006 case L2CAP_CONF_UNACCEPT:
3007 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3010 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3011 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3015 /* throw out any old stored conf requests */
3016 result = L2CAP_CONF_SUCCESS;
3017 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3020 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3024 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3025 L2CAP_CONF_REQ, len, req);
3026 chan->num_conf_req++;
3027 if (result != L2CAP_CONF_SUCCESS)
3033 l2cap_chan_set_err(chan, ECONNRESET);
3035 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3036 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3043 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3045 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3046 set_default_fcs(chan);
3048 l2cap_state_change(chan, BT_CONNECTED);
3049 chan->next_tx_seq = 0;
3050 chan->expected_tx_seq = 0;
3051 skb_queue_head_init(&chan->tx_q);
3052 if (chan->mode == L2CAP_MODE_ERTM)
3053 l2cap_ertm_init(chan);
3055 l2cap_chan_ready(chan);
3059 l2cap_chan_unlock(chan);
3063 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3065 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3066 struct l2cap_disconn_rsp rsp;
3068 struct l2cap_chan *chan;
3071 scid = __le16_to_cpu(req->scid);
3072 dcid = __le16_to_cpu(req->dcid);
3074 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3076 mutex_lock(&conn->chan_lock);
3078 chan = __l2cap_get_chan_by_scid(conn, dcid);
3080 mutex_unlock(&conn->chan_lock);
3084 l2cap_chan_lock(chan);
3088 rsp.dcid = cpu_to_le16(chan->scid);
3089 rsp.scid = cpu_to_le16(chan->dcid);
3090 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3093 sk->sk_shutdown = SHUTDOWN_MASK;
3096 l2cap_chan_del(chan, ECONNRESET);
3098 l2cap_chan_unlock(chan);
3100 chan->ops->close(chan->data);
3102 mutex_unlock(&conn->chan_lock);
3107 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3109 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3111 struct l2cap_chan *chan;
3113 scid = __le16_to_cpu(rsp->scid);
3114 dcid = __le16_to_cpu(rsp->dcid);
3116 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3118 mutex_lock(&conn->chan_lock);
3120 chan = __l2cap_get_chan_by_scid(conn, scid);
3122 mutex_unlock(&conn->chan_lock);
3126 l2cap_chan_lock(chan);
3128 l2cap_chan_del(chan, 0);
3130 l2cap_chan_unlock(chan);
3132 chan->ops->close(chan->data);
3134 mutex_unlock(&conn->chan_lock);
3139 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3141 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3144 type = __le16_to_cpu(req->type);
3146 BT_DBG("type 0x%4.4x", type);
3148 if (type == L2CAP_IT_FEAT_MASK) {
3150 u32 feat_mask = l2cap_feat_mask;
3151 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3152 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3153 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3155 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3158 feat_mask |= L2CAP_FEAT_EXT_FLOW
3159 | L2CAP_FEAT_EXT_WINDOW;
3161 put_unaligned_le32(feat_mask, rsp->data);
3162 l2cap_send_cmd(conn, cmd->ident,
3163 L2CAP_INFO_RSP, sizeof(buf), buf);
3164 } else if (type == L2CAP_IT_FIXED_CHAN) {
3166 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3169 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3171 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3173 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3174 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3175 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3176 l2cap_send_cmd(conn, cmd->ident,
3177 L2CAP_INFO_RSP, sizeof(buf), buf);
3179 struct l2cap_info_rsp rsp;
3180 rsp.type = cpu_to_le16(type);
3181 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3182 l2cap_send_cmd(conn, cmd->ident,
3183 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3189 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3191 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3194 type = __le16_to_cpu(rsp->type);
3195 result = __le16_to_cpu(rsp->result);
3197 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3199 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3200 if (cmd->ident != conn->info_ident ||
3201 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3204 cancel_delayed_work(&conn->info_timer);
3206 if (result != L2CAP_IR_SUCCESS) {
3207 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3208 conn->info_ident = 0;
3210 l2cap_conn_start(conn);
3216 case L2CAP_IT_FEAT_MASK:
3217 conn->feat_mask = get_unaligned_le32(rsp->data);
3219 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3220 struct l2cap_info_req req;
3221 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3223 conn->info_ident = l2cap_get_ident(conn);
3225 l2cap_send_cmd(conn, conn->info_ident,
3226 L2CAP_INFO_REQ, sizeof(req), &req);
3228 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3229 conn->info_ident = 0;
3231 l2cap_conn_start(conn);
3235 case L2CAP_IT_FIXED_CHAN:
3236 conn->fixed_chan_mask = rsp->data[0];
3237 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3238 conn->info_ident = 0;
3240 l2cap_conn_start(conn);
3247 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3248 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3251 struct l2cap_create_chan_req *req = data;
3252 struct l2cap_create_chan_rsp rsp;
3255 if (cmd_len != sizeof(*req))
3261 psm = le16_to_cpu(req->psm);
3262 scid = le16_to_cpu(req->scid);
3264 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3266 /* Placeholder: Always reject */
3268 rsp.scid = cpu_to_le16(scid);
3269 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3270 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3272 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3278 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3279 struct l2cap_cmd_hdr *cmd, void *data)
3281 BT_DBG("conn %p", conn);
3283 return l2cap_connect_rsp(conn, cmd, data);
3286 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3287 u16 icid, u16 result)
3289 struct l2cap_move_chan_rsp rsp;
3291 BT_DBG("icid %d, result %d", icid, result);
3293 rsp.icid = cpu_to_le16(icid);
3294 rsp.result = cpu_to_le16(result);
3296 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3299 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3300 struct l2cap_chan *chan, u16 icid, u16 result)
3302 struct l2cap_move_chan_cfm cfm;
3305 BT_DBG("icid %d, result %d", icid, result);
3307 ident = l2cap_get_ident(conn);
3309 chan->ident = ident;
3311 cfm.icid = cpu_to_le16(icid);
3312 cfm.result = cpu_to_le16(result);
3314 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3317 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3320 struct l2cap_move_chan_cfm_rsp rsp;
3322 BT_DBG("icid %d", icid);
3324 rsp.icid = cpu_to_le16(icid);
3325 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3328 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3329 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3331 struct l2cap_move_chan_req *req = data;
3333 u16 result = L2CAP_MR_NOT_ALLOWED;
3335 if (cmd_len != sizeof(*req))
3338 icid = le16_to_cpu(req->icid);
3340 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3345 /* Placeholder: Always refuse */
3346 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3351 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3352 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3354 struct l2cap_move_chan_rsp *rsp = data;
3357 if (cmd_len != sizeof(*rsp))
3360 icid = le16_to_cpu(rsp->icid);
3361 result = le16_to_cpu(rsp->result);
3363 BT_DBG("icid %d, result %d", icid, result);
3365 /* Placeholder: Always unconfirmed */
3366 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3371 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3372 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3374 struct l2cap_move_chan_cfm *cfm = data;
3377 if (cmd_len != sizeof(*cfm))
3380 icid = le16_to_cpu(cfm->icid);
3381 result = le16_to_cpu(cfm->result);
3383 BT_DBG("icid %d, result %d", icid, result);
3385 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3390 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3391 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3393 struct l2cap_move_chan_cfm_rsp *rsp = data;
3396 if (cmd_len != sizeof(*rsp))
3399 icid = le16_to_cpu(rsp->icid);
3401 BT_DBG("icid %d", icid);
3406 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3411 if (min > max || min < 6 || max > 3200)
3414 if (to_multiplier < 10 || to_multiplier > 3200)
3417 if (max >= to_multiplier * 8)
3420 max_latency = (to_multiplier * 8 / max) - 1;
3421 if (latency > 499 || latency > max_latency)
3427 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3428 struct l2cap_cmd_hdr *cmd, u8 *data)
3430 struct hci_conn *hcon = conn->hcon;
3431 struct l2cap_conn_param_update_req *req;
3432 struct l2cap_conn_param_update_rsp rsp;
3433 u16 min, max, latency, to_multiplier, cmd_len;
3436 if (!(hcon->link_mode & HCI_LM_MASTER))
3439 cmd_len = __le16_to_cpu(cmd->len);
3440 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3443 req = (struct l2cap_conn_param_update_req *) data;
3444 min = __le16_to_cpu(req->min);
3445 max = __le16_to_cpu(req->max);
3446 latency = __le16_to_cpu(req->latency);
3447 to_multiplier = __le16_to_cpu(req->to_multiplier);
3449 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3450 min, max, latency, to_multiplier);
3452 memset(&rsp, 0, sizeof(rsp));
3454 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3456 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3458 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3460 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3464 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3469 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3470 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3474 switch (cmd->code) {
3475 case L2CAP_COMMAND_REJ:
3476 l2cap_command_rej(conn, cmd, data);
3479 case L2CAP_CONN_REQ:
3480 err = l2cap_connect_req(conn, cmd, data);
3483 case L2CAP_CONN_RSP:
3484 err = l2cap_connect_rsp(conn, cmd, data);
3487 case L2CAP_CONF_REQ:
3488 err = l2cap_config_req(conn, cmd, cmd_len, data);
3491 case L2CAP_CONF_RSP:
3492 err = l2cap_config_rsp(conn, cmd, data);
3495 case L2CAP_DISCONN_REQ:
3496 err = l2cap_disconnect_req(conn, cmd, data);
3499 case L2CAP_DISCONN_RSP:
3500 err = l2cap_disconnect_rsp(conn, cmd, data);
3503 case L2CAP_ECHO_REQ:
3504 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3507 case L2CAP_ECHO_RSP:
3510 case L2CAP_INFO_REQ:
3511 err = l2cap_information_req(conn, cmd, data);
3514 case L2CAP_INFO_RSP:
3515 err = l2cap_information_rsp(conn, cmd, data);
3518 case L2CAP_CREATE_CHAN_REQ:
3519 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3522 case L2CAP_CREATE_CHAN_RSP:
3523 err = l2cap_create_channel_rsp(conn, cmd, data);
3526 case L2CAP_MOVE_CHAN_REQ:
3527 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3530 case L2CAP_MOVE_CHAN_RSP:
3531 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3534 case L2CAP_MOVE_CHAN_CFM:
3535 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3538 case L2CAP_MOVE_CHAN_CFM_RSP:
3539 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3543 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3551 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3552 struct l2cap_cmd_hdr *cmd, u8 *data)
3554 switch (cmd->code) {
3555 case L2CAP_COMMAND_REJ:
3558 case L2CAP_CONN_PARAM_UPDATE_REQ:
3559 return l2cap_conn_param_update_req(conn, cmd, data);
3561 case L2CAP_CONN_PARAM_UPDATE_RSP:
3565 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3570 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3571 struct sk_buff *skb)
3573 u8 *data = skb->data;
3575 struct l2cap_cmd_hdr cmd;
3578 l2cap_raw_recv(conn, skb);
3580 while (len >= L2CAP_CMD_HDR_SIZE) {
3582 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3583 data += L2CAP_CMD_HDR_SIZE;
3584 len -= L2CAP_CMD_HDR_SIZE;
3586 cmd_len = le16_to_cpu(cmd.len);
3588 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3590 if (cmd_len > len || !cmd.ident) {
3591 BT_DBG("corrupted command");
3595 if (conn->hcon->type == LE_LINK)
3596 err = l2cap_le_sig_cmd(conn, &cmd, data);
3598 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3601 struct l2cap_cmd_rej_unk rej;
3603 BT_ERR("Wrong link type (%d)", err);
3605 /* FIXME: Map err to a valid reason */
3606 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3607 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3617 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3619 u16 our_fcs, rcv_fcs;
3622 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3623 hdr_size = L2CAP_EXT_HDR_SIZE;
3625 hdr_size = L2CAP_ENH_HDR_SIZE;
3627 if (chan->fcs == L2CAP_FCS_CRC16) {
3628 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3629 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3630 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3632 if (our_fcs != rcv_fcs)
3638 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3642 chan->frames_sent = 0;
3644 control |= __set_reqseq(chan, chan->buffer_seq);
3646 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3647 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3648 l2cap_send_sframe(chan, control);
3649 set_bit(CONN_RNR_SENT, &chan->conn_state);
3652 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3653 l2cap_retransmit_frames(chan);
3655 l2cap_ertm_send(chan);
3657 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3658 chan->frames_sent == 0) {
3659 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3660 l2cap_send_sframe(chan, control);
3664 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3666 struct sk_buff *next_skb;
3667 int tx_seq_offset, next_tx_seq_offset;
3669 bt_cb(skb)->tx_seq = tx_seq;
3670 bt_cb(skb)->sar = sar;
3672 next_skb = skb_peek(&chan->srej_q);
3674 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3677 if (bt_cb(next_skb)->tx_seq == tx_seq)
3680 next_tx_seq_offset = __seq_offset(chan,
3681 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3683 if (next_tx_seq_offset > tx_seq_offset) {
3684 __skb_queue_before(&chan->srej_q, next_skb, skb);
3688 if (skb_queue_is_last(&chan->srej_q, next_skb))
3691 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3694 __skb_queue_tail(&chan->srej_q, skb);
3699 static void append_skb_frag(struct sk_buff *skb,
3700 struct sk_buff *new_frag, struct sk_buff **last_frag)
3702 /* skb->len reflects data in skb as well as all fragments
3703 * skb->data_len reflects only data in fragments
3705 if (!skb_has_frag_list(skb))
3706 skb_shinfo(skb)->frag_list = new_frag;
3708 new_frag->next = NULL;
3710 (*last_frag)->next = new_frag;
3711 *last_frag = new_frag;
3713 skb->len += new_frag->len;
3714 skb->data_len += new_frag->len;
3715 skb->truesize += new_frag->truesize;
3718 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3722 switch (__get_ctrl_sar(chan, control)) {
3723 case L2CAP_SAR_UNSEGMENTED:
3727 err = chan->ops->recv(chan->data, skb);
3730 case L2CAP_SAR_START:
3734 chan->sdu_len = get_unaligned_le16(skb->data);
3735 skb_pull(skb, L2CAP_SDULEN_SIZE);
3737 if (chan->sdu_len > chan->imtu) {
3742 if (skb->len >= chan->sdu_len)
3746 chan->sdu_last_frag = skb;
3752 case L2CAP_SAR_CONTINUE:
3756 append_skb_frag(chan->sdu, skb,
3757 &chan->sdu_last_frag);
3760 if (chan->sdu->len >= chan->sdu_len)
3770 append_skb_frag(chan->sdu, skb,
3771 &chan->sdu_last_frag);
3774 if (chan->sdu->len != chan->sdu_len)
3777 err = chan->ops->recv(chan->data, chan->sdu);
3780 /* Reassembly complete */
3782 chan->sdu_last_frag = NULL;
3790 kfree_skb(chan->sdu);
3792 chan->sdu_last_frag = NULL;
3799 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3801 BT_DBG("chan %p, Enter local busy", chan);
3803 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3805 __set_ack_timer(chan);
3808 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3812 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3815 control = __set_reqseq(chan, chan->buffer_seq);
3816 control |= __set_ctrl_poll(chan);
3817 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3818 l2cap_send_sframe(chan, control);
3819 chan->retry_count = 1;
3821 __clear_retrans_timer(chan);
3822 __set_monitor_timer(chan);
3824 set_bit(CONN_WAIT_F, &chan->conn_state);
3827 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3828 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3830 BT_DBG("chan %p, Exit local busy", chan);
3833 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3835 if (chan->mode == L2CAP_MODE_ERTM) {
3837 l2cap_ertm_enter_local_busy(chan);
3839 l2cap_ertm_exit_local_busy(chan);
3843 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3845 struct sk_buff *skb;
3848 while ((skb = skb_peek(&chan->srej_q)) &&
3849 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3852 if (bt_cb(skb)->tx_seq != tx_seq)
3855 skb = skb_dequeue(&chan->srej_q);
3856 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3857 err = l2cap_reassemble_sdu(chan, skb, control);
3860 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3864 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3865 tx_seq = __next_seq(chan, tx_seq);
3869 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3871 struct srej_list *l, *tmp;
3874 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3875 if (l->tx_seq == tx_seq) {
3880 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3881 control |= __set_reqseq(chan, l->tx_seq);
3882 l2cap_send_sframe(chan, control);
3884 list_add_tail(&l->list, &chan->srej_l);
3888 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3890 struct srej_list *new;
3893 while (tx_seq != chan->expected_tx_seq) {
3894 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3895 control |= __set_reqseq(chan, chan->expected_tx_seq);
3896 l2cap_send_sframe(chan, control);
3898 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3902 new->tx_seq = chan->expected_tx_seq;
3904 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3906 list_add_tail(&new->list, &chan->srej_l);
3909 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3914 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3916 u16 tx_seq = __get_txseq(chan, rx_control);
3917 u16 req_seq = __get_reqseq(chan, rx_control);
3918 u8 sar = __get_ctrl_sar(chan, rx_control);
3919 int tx_seq_offset, expected_tx_seq_offset;
3920 int num_to_ack = (chan->tx_win/6) + 1;
3923 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3924 tx_seq, rx_control);
3926 if (__is_ctrl_final(chan, rx_control) &&
3927 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3928 __clear_monitor_timer(chan);
3929 if (chan->unacked_frames > 0)
3930 __set_retrans_timer(chan);
3931 clear_bit(CONN_WAIT_F, &chan->conn_state);
3934 chan->expected_ack_seq = req_seq;
3935 l2cap_drop_acked_frames(chan);
3937 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3939 /* invalid tx_seq */
3940 if (tx_seq_offset >= chan->tx_win) {
3941 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3945 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3946 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3947 l2cap_send_ack(chan);
3951 if (tx_seq == chan->expected_tx_seq)
3954 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3955 struct srej_list *first;
3957 first = list_first_entry(&chan->srej_l,
3958 struct srej_list, list);
3959 if (tx_seq == first->tx_seq) {
3960 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3961 l2cap_check_srej_gap(chan, tx_seq);
3963 list_del(&first->list);
3966 if (list_empty(&chan->srej_l)) {
3967 chan->buffer_seq = chan->buffer_seq_srej;
3968 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3969 l2cap_send_ack(chan);
3970 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3973 struct srej_list *l;
3975 /* duplicated tx_seq */
3976 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3979 list_for_each_entry(l, &chan->srej_l, list) {
3980 if (l->tx_seq == tx_seq) {
3981 l2cap_resend_srejframe(chan, tx_seq);
3986 err = l2cap_send_srejframe(chan, tx_seq);
3988 l2cap_send_disconn_req(chan->conn, chan, -err);
3993 expected_tx_seq_offset = __seq_offset(chan,
3994 chan->expected_tx_seq, chan->buffer_seq);
3996 /* duplicated tx_seq */
3997 if (tx_seq_offset < expected_tx_seq_offset)
4000 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4002 BT_DBG("chan %p, Enter SREJ", chan);
4004 INIT_LIST_HEAD(&chan->srej_l);
4005 chan->buffer_seq_srej = chan->buffer_seq;
4007 __skb_queue_head_init(&chan->srej_q);
4008 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4010 /* Set P-bit only if there are some I-frames to ack. */
4011 if (__clear_ack_timer(chan))
4012 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4014 err = l2cap_send_srejframe(chan, tx_seq);
4016 l2cap_send_disconn_req(chan->conn, chan, -err);
4023 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4025 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4026 bt_cb(skb)->tx_seq = tx_seq;
4027 bt_cb(skb)->sar = sar;
4028 __skb_queue_tail(&chan->srej_q, skb);
4032 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4033 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4036 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4040 if (__is_ctrl_final(chan, rx_control)) {
4041 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4042 l2cap_retransmit_frames(chan);
4046 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4047 if (chan->num_acked == num_to_ack - 1)
4048 l2cap_send_ack(chan);
4050 __set_ack_timer(chan);
4059 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4061 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4062 __get_reqseq(chan, rx_control), rx_control);
4064 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4065 l2cap_drop_acked_frames(chan);
4067 if (__is_ctrl_poll(chan, rx_control)) {
4068 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4069 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4070 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4071 (chan->unacked_frames > 0))
4072 __set_retrans_timer(chan);
4074 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4075 l2cap_send_srejtail(chan);
4077 l2cap_send_i_or_rr_or_rnr(chan);
4080 } else if (__is_ctrl_final(chan, rx_control)) {
4081 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4083 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4084 l2cap_retransmit_frames(chan);
4087 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4088 (chan->unacked_frames > 0))
4089 __set_retrans_timer(chan);
4091 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4092 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4093 l2cap_send_ack(chan);
4095 l2cap_ertm_send(chan);
4099 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4101 u16 tx_seq = __get_reqseq(chan, rx_control);
4103 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4105 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4107 chan->expected_ack_seq = tx_seq;
4108 l2cap_drop_acked_frames(chan);
4110 if (__is_ctrl_final(chan, rx_control)) {
4111 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4112 l2cap_retransmit_frames(chan);
4114 l2cap_retransmit_frames(chan);
4116 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4117 set_bit(CONN_REJ_ACT, &chan->conn_state);
4120 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4122 u16 tx_seq = __get_reqseq(chan, rx_control);
4124 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4126 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4128 if (__is_ctrl_poll(chan, rx_control)) {
4129 chan->expected_ack_seq = tx_seq;
4130 l2cap_drop_acked_frames(chan);
4132 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4133 l2cap_retransmit_one_frame(chan, tx_seq);
4135 l2cap_ertm_send(chan);
4137 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4138 chan->srej_save_reqseq = tx_seq;
4139 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4141 } else if (__is_ctrl_final(chan, rx_control)) {
4142 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4143 chan->srej_save_reqseq == tx_seq)
4144 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4146 l2cap_retransmit_one_frame(chan, tx_seq);
4148 l2cap_retransmit_one_frame(chan, tx_seq);
4149 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4150 chan->srej_save_reqseq = tx_seq;
4151 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4156 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4158 u16 tx_seq = __get_reqseq(chan, rx_control);
4160 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4162 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4163 chan->expected_ack_seq = tx_seq;
4164 l2cap_drop_acked_frames(chan);
4166 if (__is_ctrl_poll(chan, rx_control))
4167 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4169 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4170 __clear_retrans_timer(chan);
4171 if (__is_ctrl_poll(chan, rx_control))
4172 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4176 if (__is_ctrl_poll(chan, rx_control)) {
4177 l2cap_send_srejtail(chan);
4179 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4180 l2cap_send_sframe(chan, rx_control);
4184 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4186 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4188 if (__is_ctrl_final(chan, rx_control) &&
4189 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4190 __clear_monitor_timer(chan);
4191 if (chan->unacked_frames > 0)
4192 __set_retrans_timer(chan);
4193 clear_bit(CONN_WAIT_F, &chan->conn_state);
4196 switch (__get_ctrl_super(chan, rx_control)) {
4197 case L2CAP_SUPER_RR:
4198 l2cap_data_channel_rrframe(chan, rx_control);
4201 case L2CAP_SUPER_REJ:
4202 l2cap_data_channel_rejframe(chan, rx_control);
4205 case L2CAP_SUPER_SREJ:
4206 l2cap_data_channel_srejframe(chan, rx_control);
4209 case L2CAP_SUPER_RNR:
4210 l2cap_data_channel_rnrframe(chan, rx_control);
4218 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4222 int len, next_tx_seq_offset, req_seq_offset;
4224 control = __get_control(chan, skb->data);
4225 skb_pull(skb, __ctrl_size(chan));
4229 * We can just drop the corrupted I-frame here.
4230 * Receiver will miss it and start proper recovery
4231 * procedures and ask retransmission.
4233 if (l2cap_check_fcs(chan, skb))
4236 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4237 len -= L2CAP_SDULEN_SIZE;
4239 if (chan->fcs == L2CAP_FCS_CRC16)
4240 len -= L2CAP_FCS_SIZE;
4242 if (len > chan->mps) {
4243 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4247 req_seq = __get_reqseq(chan, control);
4249 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4251 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4252 chan->expected_ack_seq);
4254 /* check for invalid req-seq */
4255 if (req_seq_offset > next_tx_seq_offset) {
4256 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4260 if (!__is_sframe(chan, control)) {
4262 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4266 l2cap_data_channel_iframe(chan, control, skb);
4270 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4274 l2cap_data_channel_sframe(chan, control, skb);
4284 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4286 struct l2cap_chan *chan;
4291 chan = l2cap_get_chan_by_scid(conn, cid);
4293 BT_DBG("unknown cid 0x%4.4x", cid);
4294 /* Drop packet and return */
4299 l2cap_chan_lock(chan);
4301 BT_DBG("chan %p, len %d", chan, skb->len);
4303 if (chan->state != BT_CONNECTED)
4306 switch (chan->mode) {
4307 case L2CAP_MODE_BASIC:
4308 /* If socket recv buffers overflows we drop data here
4309 * which is *bad* because L2CAP has to be reliable.
4310 * But we don't have any other choice. L2CAP doesn't
4311 * provide flow control mechanism. */
4313 if (chan->imtu < skb->len)
4316 if (!chan->ops->recv(chan->data, skb))
4320 case L2CAP_MODE_ERTM:
4321 l2cap_ertm_data_rcv(chan, skb);
4325 case L2CAP_MODE_STREAMING:
4326 control = __get_control(chan, skb->data);
4327 skb_pull(skb, __ctrl_size(chan));
4330 if (l2cap_check_fcs(chan, skb))
4333 if (__is_sar_start(chan, control))
4334 len -= L2CAP_SDULEN_SIZE;
4336 if (chan->fcs == L2CAP_FCS_CRC16)
4337 len -= L2CAP_FCS_SIZE;
4339 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4342 tx_seq = __get_txseq(chan, control);
4344 if (chan->expected_tx_seq != tx_seq) {
4345 /* Frame(s) missing - must discard partial SDU */
4346 kfree_skb(chan->sdu);
4348 chan->sdu_last_frag = NULL;
4351 /* TODO: Notify userland of missing data */
4354 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4356 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4357 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4362 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4370 l2cap_chan_unlock(chan);
4375 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4377 struct l2cap_chan *chan;
4379 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4383 BT_DBG("chan %p, len %d", chan, skb->len);
4385 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4388 if (chan->imtu < skb->len)
4391 if (!chan->ops->recv(chan->data, skb))
4400 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4401 struct sk_buff *skb)
4403 struct l2cap_chan *chan;
4405 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4409 BT_DBG("chan %p, len %d", chan, skb->len);
4411 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4414 if (chan->imtu < skb->len)
4417 if (!chan->ops->recv(chan->data, skb))
4426 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4428 struct l2cap_hdr *lh = (void *) skb->data;
4432 skb_pull(skb, L2CAP_HDR_SIZE);
4433 cid = __le16_to_cpu(lh->cid);
4434 len = __le16_to_cpu(lh->len);
4436 if (len != skb->len) {
4441 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4444 case L2CAP_CID_LE_SIGNALING:
4445 case L2CAP_CID_SIGNALING:
4446 l2cap_sig_channel(conn, skb);
4449 case L2CAP_CID_CONN_LESS:
4450 psm = get_unaligned((__le16 *) skb->data);
4452 l2cap_conless_channel(conn, psm, skb);
4455 case L2CAP_CID_LE_DATA:
4456 l2cap_att_channel(conn, cid, skb);
4460 if (smp_sig_channel(conn, skb))
4461 l2cap_conn_del(conn->hcon, EACCES);
4465 l2cap_data_channel(conn, cid, skb);
4470 /* ---- L2CAP interface with lower layer (HCI) ---- */
4472 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4474 int exact = 0, lm1 = 0, lm2 = 0;
4475 struct l2cap_chan *c;
4477 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4479 /* Find listening sockets and check their link_mode */
4480 read_lock(&chan_list_lock);
4481 list_for_each_entry(c, &chan_list, global_l) {
4482 struct sock *sk = c->sk;
4484 if (c->state != BT_LISTEN)
4487 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4488 lm1 |= HCI_LM_ACCEPT;
4489 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4490 lm1 |= HCI_LM_MASTER;
4492 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4493 lm2 |= HCI_LM_ACCEPT;
4494 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4495 lm2 |= HCI_LM_MASTER;
4498 read_unlock(&chan_list_lock);
4500 return exact ? lm1 : lm2;
4503 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4505 struct l2cap_conn *conn;
4507 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4510 conn = l2cap_conn_add(hcon, status);
4512 l2cap_conn_ready(conn);
4514 l2cap_conn_del(hcon, bt_to_errno(status));
4519 int l2cap_disconn_ind(struct hci_conn *hcon)
4521 struct l2cap_conn *conn = hcon->l2cap_data;
4523 BT_DBG("hcon %p", hcon);
4526 return HCI_ERROR_REMOTE_USER_TERM;
4527 return conn->disc_reason;
4530 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4532 BT_DBG("hcon %p reason %d", hcon, reason);
4534 l2cap_conn_del(hcon, bt_to_errno(reason));
4538 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4540 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4543 if (encrypt == 0x00) {
4544 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4545 __clear_chan_timer(chan);
4546 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4547 } else if (chan->sec_level == BT_SECURITY_HIGH)
4548 l2cap_chan_close(chan, ECONNREFUSED);
4550 if (chan->sec_level == BT_SECURITY_MEDIUM)
4551 __clear_chan_timer(chan);
4555 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4557 struct l2cap_conn *conn = hcon->l2cap_data;
4558 struct l2cap_chan *chan;
4563 BT_DBG("conn %p", conn);
4565 if (hcon->type == LE_LINK) {
4566 smp_distribute_keys(conn, 0);
4567 cancel_delayed_work(&conn->security_timer);
4570 mutex_lock(&conn->chan_lock);
4572 list_for_each_entry(chan, &conn->chan_l, list) {
4573 l2cap_chan_lock(chan);
4575 BT_DBG("chan->scid %d", chan->scid);
4577 if (chan->scid == L2CAP_CID_LE_DATA) {
4578 if (!status && encrypt) {
4579 chan->sec_level = hcon->sec_level;
4580 l2cap_chan_ready(chan);
4583 l2cap_chan_unlock(chan);
4587 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4588 l2cap_chan_unlock(chan);
4592 if (!status && (chan->state == BT_CONNECTED ||
4593 chan->state == BT_CONFIG)) {
4594 l2cap_check_encryption(chan, encrypt);
4595 l2cap_chan_unlock(chan);
4599 if (chan->state == BT_CONNECT) {
4601 l2cap_send_conn_req(chan);
4603 __clear_chan_timer(chan);
4604 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4606 } else if (chan->state == BT_CONNECT2) {
4607 struct sock *sk = chan->sk;
4608 struct l2cap_conn_rsp rsp;
4614 if (bt_sk(sk)->defer_setup) {
4615 struct sock *parent = bt_sk(sk)->parent;
4616 res = L2CAP_CR_PEND;
4617 stat = L2CAP_CS_AUTHOR_PEND;
4619 parent->sk_data_ready(parent, 0);
4621 __l2cap_state_change(chan, BT_CONFIG);
4622 res = L2CAP_CR_SUCCESS;
4623 stat = L2CAP_CS_NO_INFO;
4626 __l2cap_state_change(chan, BT_DISCONN);
4627 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4628 res = L2CAP_CR_SEC_BLOCK;
4629 stat = L2CAP_CS_NO_INFO;
4634 rsp.scid = cpu_to_le16(chan->dcid);
4635 rsp.dcid = cpu_to_le16(chan->scid);
4636 rsp.result = cpu_to_le16(res);
4637 rsp.status = cpu_to_le16(stat);
4638 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4642 l2cap_chan_unlock(chan);
4645 mutex_unlock(&conn->chan_lock);
4650 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4652 struct l2cap_conn *conn = hcon->l2cap_data;
4655 conn = l2cap_conn_add(hcon, 0);
4660 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4662 if (!(flags & ACL_CONT)) {
4663 struct l2cap_hdr *hdr;
4664 struct l2cap_chan *chan;
4669 BT_ERR("Unexpected start frame (len %d)", skb->len);
4670 kfree_skb(conn->rx_skb);
4671 conn->rx_skb = NULL;
4673 l2cap_conn_unreliable(conn, ECOMM);
4676 /* Start fragment always begin with Basic L2CAP header */
4677 if (skb->len < L2CAP_HDR_SIZE) {
4678 BT_ERR("Frame is too short (len %d)", skb->len);
4679 l2cap_conn_unreliable(conn, ECOMM);
4683 hdr = (struct l2cap_hdr *) skb->data;
4684 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4685 cid = __le16_to_cpu(hdr->cid);
4687 if (len == skb->len) {
4688 /* Complete frame received */
4689 l2cap_recv_frame(conn, skb);
4693 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4695 if (skb->len > len) {
4696 BT_ERR("Frame is too long (len %d, expected len %d)",
4698 l2cap_conn_unreliable(conn, ECOMM);
4702 chan = l2cap_get_chan_by_scid(conn, cid);
4704 if (chan && chan->sk) {
4705 struct sock *sk = chan->sk;
4708 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4709 BT_ERR("Frame exceeding recv MTU (len %d, "
4713 l2cap_conn_unreliable(conn, ECOMM);
4719 /* Allocate skb for the complete frame (with header) */
4720 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4724 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4726 conn->rx_len = len - skb->len;
4728 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4730 if (!conn->rx_len) {
4731 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4732 l2cap_conn_unreliable(conn, ECOMM);
4736 if (skb->len > conn->rx_len) {
4737 BT_ERR("Fragment is too long (len %d, expected %d)",
4738 skb->len, conn->rx_len);
4739 kfree_skb(conn->rx_skb);
4740 conn->rx_skb = NULL;
4742 l2cap_conn_unreliable(conn, ECOMM);
4746 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4748 conn->rx_len -= skb->len;
4750 if (!conn->rx_len) {
4751 /* Complete frame received */
4752 l2cap_recv_frame(conn, conn->rx_skb);
4753 conn->rx_skb = NULL;
4762 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4764 struct l2cap_chan *c;
4766 read_lock(&chan_list_lock);
4768 list_for_each_entry(c, &chan_list, global_l) {
4769 struct sock *sk = c->sk;
4771 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4772 batostr(&bt_sk(sk)->src),
4773 batostr(&bt_sk(sk)->dst),
4774 c->state, __le16_to_cpu(c->psm),
4775 c->scid, c->dcid, c->imtu, c->omtu,
4776 c->sec_level, c->mode);
4779 read_unlock(&chan_list_lock);
4784 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4786 return single_open(file, l2cap_debugfs_show, inode->i_private);
4789 static const struct file_operations l2cap_debugfs_fops = {
4790 .open = l2cap_debugfs_open,
4792 .llseek = seq_lseek,
4793 .release = single_release,
4796 static struct dentry *l2cap_debugfs;
4798 int __init l2cap_init(void)
4802 err = l2cap_init_sockets();
4807 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4808 bt_debugfs, NULL, &l2cap_debugfs_fops);
4810 BT_ERR("Failed to create L2CAP debug file");
4816 void l2cap_exit(void)
4818 debugfs_remove(l2cap_debugfs);
4819 l2cap_cleanup_sockets();
4822 module_param(disable_ertm, bool, 0644);
4823 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");