Bluetooth: Send SREJ frames when packets go missing
[linux-2.6-block.git] / net / bluetooth / l2cap_core.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27*/
28
29/* Bluetooth L2CAP core. */
30
31#include <linux/module.h>
32
33#include <linux/types.h>
34#include <linux/capability.h>
35#include <linux/errno.h>
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/poll.h>
40#include <linux/fcntl.h>
41#include <linux/init.h>
42#include <linux/interrupt.h>
43#include <linux/socket.h>
44#include <linux/skbuff.h>
45#include <linux/list.h>
46#include <linux/device.h>
47#include <linux/debugfs.h>
48#include <linux/seq_file.h>
49#include <linux/uaccess.h>
50#include <linux/crc16.h>
51#include <net/sock.h>
52
53#include <asm/unaligned.h>
54
55#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h>
58#include <net/bluetooth/smp.h>
59
60bool disable_ertm = 1;
61
62static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65static LIST_HEAD(chan_list);
66static DEFINE_RWLOCK(chan_list_lock);
67
68static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
77 struct sk_buff_head *skbs, u8 event);
78
79/* ---- L2CAP channels ---- */
80
81static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82{
83 struct l2cap_chan *c;
84
85 list_for_each_entry(c, &conn->chan_l, list) {
86 if (c->dcid == cid)
87 return c;
88 }
89 return NULL;
90}
91
92static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93{
94 struct l2cap_chan *c;
95
96 list_for_each_entry(c, &conn->chan_l, list) {
97 if (c->scid == cid)
98 return c;
99 }
100 return NULL;
101}
102
103/* Find channel with given SCID.
104 * Returns locked channel. */
105static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
106{
107 struct l2cap_chan *c;
108
109 mutex_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
111 if (c)
112 l2cap_chan_lock(c);
113 mutex_unlock(&conn->chan_lock);
114
115 return c;
116}
117
118static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
119{
120 struct l2cap_chan *c;
121
122 list_for_each_entry(c, &conn->chan_l, list) {
123 if (c->ident == ident)
124 return c;
125 }
126 return NULL;
127}
128
129static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
130{
131 struct l2cap_chan *c;
132
133 list_for_each_entry(c, &chan_list, global_l) {
134 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
135 return c;
136 }
137 return NULL;
138}
139
140int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
141{
142 int err;
143
144 write_lock(&chan_list_lock);
145
146 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
147 err = -EADDRINUSE;
148 goto done;
149 }
150
151 if (psm) {
152 chan->psm = psm;
153 chan->sport = psm;
154 err = 0;
155 } else {
156 u16 p;
157
158 err = -EINVAL;
159 for (p = 0x1001; p < 0x1100; p += 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
161 chan->psm = cpu_to_le16(p);
162 chan->sport = cpu_to_le16(p);
163 err = 0;
164 break;
165 }
166 }
167
168done:
169 write_unlock(&chan_list_lock);
170 return err;
171}
172
173int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
174{
175 write_lock(&chan_list_lock);
176
177 chan->scid = scid;
178
179 write_unlock(&chan_list_lock);
180
181 return 0;
182}
183
184static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
185{
186 u16 cid = L2CAP_CID_DYN_START;
187
188 for (; cid < L2CAP_CID_DYN_END; cid++) {
189 if (!__l2cap_get_chan_by_scid(conn, cid))
190 return cid;
191 }
192
193 return 0;
194}
195
196static void __l2cap_state_change(struct l2cap_chan *chan, int state)
197{
198 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
199 state_to_string(state));
200
201 chan->state = state;
202 chan->ops->state_change(chan->data, state);
203}
204
205static void l2cap_state_change(struct l2cap_chan *chan, int state)
206{
207 struct sock *sk = chan->sk;
208
209 lock_sock(sk);
210 __l2cap_state_change(chan, state);
211 release_sock(sk);
212}
213
214static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
215{
216 struct sock *sk = chan->sk;
217
218 sk->sk_err = err;
219}
220
221static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
222{
223 struct sock *sk = chan->sk;
224
225 lock_sock(sk);
226 __l2cap_chan_set_err(chan, err);
227 release_sock(sk);
228}
229
230static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232{
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241}
242
243/* ---- L2CAP sequence number lists ---- */
244
245/* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
251 * allocs or frees.
252 */
253
254static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255{
256 size_t alloc_size, i;
257
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
261 */
262 alloc_size = roundup_pow_of_two(size);
263
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
265 if (!seq_list->list)
266 return -ENOMEM;
267
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
273
274 return 0;
275}
276
277static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278{
279 kfree(seq_list->list);
280}
281
282static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
283 u16 seq)
284{
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
287}
288
289static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290{
291 u16 mask = seq_list->mask;
292
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
304 }
305 } else {
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
312 }
313
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
319 }
320 return seq;
321}
322
323static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324{
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
327}
328
329static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
330{
331 u16 i;
332
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341}
342
343static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344{
345 u16 mask = seq_list->mask;
346
347 /* All appends happen in constant time */
348
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
350 return;
351
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
354 else
355 seq_list->list[seq_list->tail & mask] = seq;
356
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
359}
360
361static void l2cap_chan_timeout(struct work_struct *work)
362{
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 chan_timer.work);
365 struct l2cap_conn *conn = chan->conn;
366 int reason;
367
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
372
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
378 else
379 reason = ETIMEDOUT;
380
381 l2cap_chan_close(chan, reason);
382
383 l2cap_chan_unlock(chan);
384
385 chan->ops->close(chan->data);
386 mutex_unlock(&conn->chan_lock);
387
388 l2cap_chan_put(chan);
389}
390
391struct l2cap_chan *l2cap_chan_create(void)
392{
393 struct l2cap_chan *chan;
394
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
396 if (!chan)
397 return NULL;
398
399 mutex_init(&chan->lock);
400
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
404
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406
407 chan->state = BT_OPEN;
408
409 atomic_set(&chan->refcnt, 1);
410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
414 BT_DBG("chan %p", chan);
415
416 return chan;
417}
418
419void l2cap_chan_destroy(struct l2cap_chan *chan)
420{
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
424
425 l2cap_chan_put(chan);
426}
427
428void l2cap_chan_set_defaults(struct l2cap_chan *chan)
429{
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
435
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
437}
438
439static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
440{
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
443
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
445
446 chan->conn = conn;
447
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
451 /* LE connection */
452 chan->omtu = L2CAP_LE_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
455 } else {
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
459 }
460 break;
461
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
467 break;
468
469 default:
470 /* Raw socket can send/recv signalling messages only */
471 chan->scid = L2CAP_CID_SIGNALING;
472 chan->dcid = L2CAP_CID_SIGNALING;
473 chan->omtu = L2CAP_DEFAULT_MTU;
474 }
475
476 chan->local_id = L2CAP_BESTEFFORT_ID;
477 chan->local_stype = L2CAP_SERV_BESTEFFORT;
478 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
479 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
480 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
481 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
482
483 l2cap_chan_hold(chan);
484
485 list_add(&chan->list, &conn->chan_l);
486}
487
488static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
489{
490 mutex_lock(&conn->chan_lock);
491 __l2cap_chan_add(conn, chan);
492 mutex_unlock(&conn->chan_lock);
493}
494
495static void l2cap_chan_del(struct l2cap_chan *chan, int err)
496{
497 struct sock *sk = chan->sk;
498 struct l2cap_conn *conn = chan->conn;
499 struct sock *parent = bt_sk(sk)->parent;
500
501 __clear_chan_timer(chan);
502
503 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
504
505 if (conn) {
506 /* Delete from channel list */
507 list_del(&chan->list);
508
509 l2cap_chan_put(chan);
510
511 chan->conn = NULL;
512 hci_conn_put(conn->hcon);
513 }
514
515 lock_sock(sk);
516
517 __l2cap_state_change(chan, BT_CLOSED);
518 sock_set_flag(sk, SOCK_ZAPPED);
519
520 if (err)
521 __l2cap_chan_set_err(chan, err);
522
523 if (parent) {
524 bt_accept_unlink(sk);
525 parent->sk_data_ready(parent, 0);
526 } else
527 sk->sk_state_change(sk);
528
529 release_sock(sk);
530
531 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
532 return;
533
534 skb_queue_purge(&chan->tx_q);
535
536 if (chan->mode == L2CAP_MODE_ERTM) {
537 __clear_retrans_timer(chan);
538 __clear_monitor_timer(chan);
539 __clear_ack_timer(chan);
540
541 skb_queue_purge(&chan->srej_q);
542
543 l2cap_seq_list_free(&chan->srej_list);
544 l2cap_seq_list_free(&chan->retrans_list);
545 }
546}
547
548static void l2cap_chan_cleanup_listen(struct sock *parent)
549{
550 struct sock *sk;
551
552 BT_DBG("parent %p", parent);
553
554 /* Close not yet accepted channels */
555 while ((sk = bt_accept_dequeue(parent, NULL))) {
556 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
557
558 l2cap_chan_lock(chan);
559 __clear_chan_timer(chan);
560 l2cap_chan_close(chan, ECONNRESET);
561 l2cap_chan_unlock(chan);
562
563 chan->ops->close(chan->data);
564 }
565}
566
567void l2cap_chan_close(struct l2cap_chan *chan, int reason)
568{
569 struct l2cap_conn *conn = chan->conn;
570 struct sock *sk = chan->sk;
571
572 BT_DBG("chan %p state %s sk %p", chan,
573 state_to_string(chan->state), sk);
574
575 switch (chan->state) {
576 case BT_LISTEN:
577 lock_sock(sk);
578 l2cap_chan_cleanup_listen(sk);
579
580 __l2cap_state_change(chan, BT_CLOSED);
581 sock_set_flag(sk, SOCK_ZAPPED);
582 release_sock(sk);
583 break;
584
585 case BT_CONNECTED:
586 case BT_CONFIG:
587 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
588 conn->hcon->type == ACL_LINK) {
589 __set_chan_timer(chan, sk->sk_sndtimeo);
590 l2cap_send_disconn_req(conn, chan, reason);
591 } else
592 l2cap_chan_del(chan, reason);
593 break;
594
595 case BT_CONNECT2:
596 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
597 conn->hcon->type == ACL_LINK) {
598 struct l2cap_conn_rsp rsp;
599 __u16 result;
600
601 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
602 result = L2CAP_CR_SEC_BLOCK;
603 else
604 result = L2CAP_CR_BAD_PSM;
605 l2cap_state_change(chan, BT_DISCONN);
606
607 rsp.scid = cpu_to_le16(chan->dcid);
608 rsp.dcid = cpu_to_le16(chan->scid);
609 rsp.result = cpu_to_le16(result);
610 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
611 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
612 sizeof(rsp), &rsp);
613 }
614
615 l2cap_chan_del(chan, reason);
616 break;
617
618 case BT_CONNECT:
619 case BT_DISCONN:
620 l2cap_chan_del(chan, reason);
621 break;
622
623 default:
624 lock_sock(sk);
625 sock_set_flag(sk, SOCK_ZAPPED);
626 release_sock(sk);
627 break;
628 }
629}
630
631static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
632{
633 if (chan->chan_type == L2CAP_CHAN_RAW) {
634 switch (chan->sec_level) {
635 case BT_SECURITY_HIGH:
636 return HCI_AT_DEDICATED_BONDING_MITM;
637 case BT_SECURITY_MEDIUM:
638 return HCI_AT_DEDICATED_BONDING;
639 default:
640 return HCI_AT_NO_BONDING;
641 }
642 } else if (chan->psm == cpu_to_le16(0x0001)) {
643 if (chan->sec_level == BT_SECURITY_LOW)
644 chan->sec_level = BT_SECURITY_SDP;
645
646 if (chan->sec_level == BT_SECURITY_HIGH)
647 return HCI_AT_NO_BONDING_MITM;
648 else
649 return HCI_AT_NO_BONDING;
650 } else {
651 switch (chan->sec_level) {
652 case BT_SECURITY_HIGH:
653 return HCI_AT_GENERAL_BONDING_MITM;
654 case BT_SECURITY_MEDIUM:
655 return HCI_AT_GENERAL_BONDING;
656 default:
657 return HCI_AT_NO_BONDING;
658 }
659 }
660}
661
662/* Service level security */
663int l2cap_chan_check_security(struct l2cap_chan *chan)
664{
665 struct l2cap_conn *conn = chan->conn;
666 __u8 auth_type;
667
668 auth_type = l2cap_get_auth_type(chan);
669
670 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
671}
672
673static u8 l2cap_get_ident(struct l2cap_conn *conn)
674{
675 u8 id;
676
677 /* Get next available identificator.
678 * 1 - 128 are used by kernel.
679 * 129 - 199 are reserved.
680 * 200 - 254 are used by utilities like l2ping, etc.
681 */
682
683 spin_lock(&conn->lock);
684
685 if (++conn->tx_ident > 128)
686 conn->tx_ident = 1;
687
688 id = conn->tx_ident;
689
690 spin_unlock(&conn->lock);
691
692 return id;
693}
694
695static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
696{
697 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
698 u8 flags;
699
700 BT_DBG("code 0x%2.2x", code);
701
702 if (!skb)
703 return;
704
705 if (lmp_no_flush_capable(conn->hcon->hdev))
706 flags = ACL_START_NO_FLUSH;
707 else
708 flags = ACL_START;
709
710 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
711 skb->priority = HCI_PRIO_MAX;
712
713 hci_send_acl(conn->hchan, skb, flags);
714}
715
716static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
717{
718 struct hci_conn *hcon = chan->conn->hcon;
719 u16 flags;
720
721 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
722 skb->priority);
723
724 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
725 lmp_no_flush_capable(hcon->hdev))
726 flags = ACL_START_NO_FLUSH;
727 else
728 flags = ACL_START;
729
730 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
731 hci_send_acl(chan->conn->hchan, skb, flags);
732}
733
734static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
735{
736 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
737 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
738
739 if (enh & L2CAP_CTRL_FRAME_TYPE) {
740 /* S-Frame */
741 control->sframe = 1;
742 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
743 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
744
745 control->sar = 0;
746 control->txseq = 0;
747 } else {
748 /* I-Frame */
749 control->sframe = 0;
750 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
751 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
752
753 control->poll = 0;
754 control->super = 0;
755 }
756}
757
758static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
759{
760 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
761 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
762
763 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
764 /* S-Frame */
765 control->sframe = 1;
766 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
767 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
768
769 control->sar = 0;
770 control->txseq = 0;
771 } else {
772 /* I-Frame */
773 control->sframe = 0;
774 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
775 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
776
777 control->poll = 0;
778 control->super = 0;
779 }
780}
781
782static inline void __unpack_control(struct l2cap_chan *chan,
783 struct sk_buff *skb)
784{
785 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
786 __unpack_extended_control(get_unaligned_le32(skb->data),
787 &bt_cb(skb)->control);
788 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
789 } else {
790 __unpack_enhanced_control(get_unaligned_le16(skb->data),
791 &bt_cb(skb)->control);
792 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
793 }
794}
795
796static u32 __pack_extended_control(struct l2cap_ctrl *control)
797{
798 u32 packed;
799
800 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
801 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
802
803 if (control->sframe) {
804 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
805 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
806 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
807 } else {
808 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
809 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
810 }
811
812 return packed;
813}
814
815static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
816{
817 u16 packed;
818
819 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
820 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
821
822 if (control->sframe) {
823 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
824 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
825 packed |= L2CAP_CTRL_FRAME_TYPE;
826 } else {
827 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
828 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
829 }
830
831 return packed;
832}
833
834static inline void __pack_control(struct l2cap_chan *chan,
835 struct l2cap_ctrl *control,
836 struct sk_buff *skb)
837{
838 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
839 put_unaligned_le32(__pack_extended_control(control),
840 skb->data + L2CAP_HDR_SIZE);
841 } else {
842 put_unaligned_le16(__pack_enhanced_control(control),
843 skb->data + L2CAP_HDR_SIZE);
844 }
845}
846
847static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
848 u32 control)
849{
850 struct sk_buff *skb;
851 struct l2cap_hdr *lh;
852 int hlen;
853
854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 hlen = L2CAP_EXT_HDR_SIZE;
856 else
857 hlen = L2CAP_ENH_HDR_SIZE;
858
859 if (chan->fcs == L2CAP_FCS_CRC16)
860 hlen += L2CAP_FCS_SIZE;
861
862 skb = bt_skb_alloc(hlen, GFP_KERNEL);
863
864 if (!skb)
865 return ERR_PTR(-ENOMEM);
866
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid);
870
871 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
872 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
873 else
874 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
875
876 if (chan->fcs == L2CAP_FCS_CRC16) {
877 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
878 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
879 }
880
881 skb->priority = HCI_PRIO_MAX;
882 return skb;
883}
884
885static void l2cap_send_sframe(struct l2cap_chan *chan,
886 struct l2cap_ctrl *control)
887{
888 struct sk_buff *skb;
889 u32 control_field;
890
891 BT_DBG("chan %p, control %p", chan, control);
892
893 if (!control->sframe)
894 return;
895
896 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
897 !control->poll)
898 control->final = 1;
899
900 if (control->super == L2CAP_SUPER_RR)
901 clear_bit(CONN_RNR_SENT, &chan->conn_state);
902 else if (control->super == L2CAP_SUPER_RNR)
903 set_bit(CONN_RNR_SENT, &chan->conn_state);
904
905 if (control->super != L2CAP_SUPER_SREJ) {
906 chan->last_acked_seq = control->reqseq;
907 __clear_ack_timer(chan);
908 }
909
910 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
911 control->final, control->poll, control->super);
912
913 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
914 control_field = __pack_extended_control(control);
915 else
916 control_field = __pack_enhanced_control(control);
917
918 skb = l2cap_create_sframe_pdu(chan, control_field);
919 if (!IS_ERR(skb))
920 l2cap_do_send(chan, skb);
921}
922
923static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
924{
925 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
926 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
927 set_bit(CONN_RNR_SENT, &chan->conn_state);
928 } else
929 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
930
931 control |= __set_reqseq(chan, chan->buffer_seq);
932}
933
934static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
935{
936 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
937}
938
939static void l2cap_send_conn_req(struct l2cap_chan *chan)
940{
941 struct l2cap_conn *conn = chan->conn;
942 struct l2cap_conn_req req;
943
944 req.scid = cpu_to_le16(chan->scid);
945 req.psm = chan->psm;
946
947 chan->ident = l2cap_get_ident(conn);
948
949 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
950
951 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
952}
953
954static void l2cap_chan_ready(struct l2cap_chan *chan)
955{
956 struct sock *sk = chan->sk;
957 struct sock *parent;
958
959 lock_sock(sk);
960
961 parent = bt_sk(sk)->parent;
962
963 BT_DBG("sk %p, parent %p", sk, parent);
964
965 /* This clears all conf flags, including CONF_NOT_COMPLETE */
966 chan->conf_state = 0;
967 __clear_chan_timer(chan);
968
969 __l2cap_state_change(chan, BT_CONNECTED);
970 sk->sk_state_change(sk);
971
972 if (parent)
973 parent->sk_data_ready(parent, 0);
974
975 release_sock(sk);
976}
977
978static void l2cap_do_start(struct l2cap_chan *chan)
979{
980 struct l2cap_conn *conn = chan->conn;
981
982 if (conn->hcon->type == LE_LINK) {
983 l2cap_chan_ready(chan);
984 return;
985 }
986
987 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
988 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
989 return;
990
991 if (l2cap_chan_check_security(chan) &&
992 __l2cap_no_conn_pending(chan))
993 l2cap_send_conn_req(chan);
994 } else {
995 struct l2cap_info_req req;
996 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
997
998 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
999 conn->info_ident = l2cap_get_ident(conn);
1000
1001 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1002
1003 l2cap_send_cmd(conn, conn->info_ident,
1004 L2CAP_INFO_REQ, sizeof(req), &req);
1005 }
1006}
1007
1008static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1009{
1010 u32 local_feat_mask = l2cap_feat_mask;
1011 if (!disable_ertm)
1012 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1013
1014 switch (mode) {
1015 case L2CAP_MODE_ERTM:
1016 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1017 case L2CAP_MODE_STREAMING:
1018 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1019 default:
1020 return 0x00;
1021 }
1022}
1023
1024static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1025{
1026 struct sock *sk = chan->sk;
1027 struct l2cap_disconn_req req;
1028
1029 if (!conn)
1030 return;
1031
1032 if (chan->mode == L2CAP_MODE_ERTM) {
1033 __clear_retrans_timer(chan);
1034 __clear_monitor_timer(chan);
1035 __clear_ack_timer(chan);
1036 }
1037
1038 req.dcid = cpu_to_le16(chan->dcid);
1039 req.scid = cpu_to_le16(chan->scid);
1040 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1041 L2CAP_DISCONN_REQ, sizeof(req), &req);
1042
1043 lock_sock(sk);
1044 __l2cap_state_change(chan, BT_DISCONN);
1045 __l2cap_chan_set_err(chan, err);
1046 release_sock(sk);
1047}
1048
1049/* ---- L2CAP connections ---- */
1050static void l2cap_conn_start(struct l2cap_conn *conn)
1051{
1052 struct l2cap_chan *chan, *tmp;
1053
1054 BT_DBG("conn %p", conn);
1055
1056 mutex_lock(&conn->chan_lock);
1057
1058 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1059 struct sock *sk = chan->sk;
1060
1061 l2cap_chan_lock(chan);
1062
1063 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1064 l2cap_chan_unlock(chan);
1065 continue;
1066 }
1067
1068 if (chan->state == BT_CONNECT) {
1069 if (!l2cap_chan_check_security(chan) ||
1070 !__l2cap_no_conn_pending(chan)) {
1071 l2cap_chan_unlock(chan);
1072 continue;
1073 }
1074
1075 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1076 && test_bit(CONF_STATE2_DEVICE,
1077 &chan->conf_state)) {
1078 l2cap_chan_close(chan, ECONNRESET);
1079 l2cap_chan_unlock(chan);
1080 continue;
1081 }
1082
1083 l2cap_send_conn_req(chan);
1084
1085 } else if (chan->state == BT_CONNECT2) {
1086 struct l2cap_conn_rsp rsp;
1087 char buf[128];
1088 rsp.scid = cpu_to_le16(chan->dcid);
1089 rsp.dcid = cpu_to_le16(chan->scid);
1090
1091 if (l2cap_chan_check_security(chan)) {
1092 lock_sock(sk);
1093 if (test_bit(BT_SK_DEFER_SETUP,
1094 &bt_sk(sk)->flags)) {
1095 struct sock *parent = bt_sk(sk)->parent;
1096 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1097 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1098 if (parent)
1099 parent->sk_data_ready(parent, 0);
1100
1101 } else {
1102 __l2cap_state_change(chan, BT_CONFIG);
1103 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1104 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1105 }
1106 release_sock(sk);
1107 } else {
1108 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1109 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1110 }
1111
1112 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1113 sizeof(rsp), &rsp);
1114
1115 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1116 rsp.result != L2CAP_CR_SUCCESS) {
1117 l2cap_chan_unlock(chan);
1118 continue;
1119 }
1120
1121 set_bit(CONF_REQ_SENT, &chan->conf_state);
1122 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1123 l2cap_build_conf_req(chan, buf), buf);
1124 chan->num_conf_req++;
1125 }
1126
1127 l2cap_chan_unlock(chan);
1128 }
1129
1130 mutex_unlock(&conn->chan_lock);
1131}
1132
1133/* Find socket with cid and source/destination bdaddr.
1134 * Returns closest match, locked.
1135 */
1136static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1137 bdaddr_t *src,
1138 bdaddr_t *dst)
1139{
1140 struct l2cap_chan *c, *c1 = NULL;
1141
1142 read_lock(&chan_list_lock);
1143
1144 list_for_each_entry(c, &chan_list, global_l) {
1145 struct sock *sk = c->sk;
1146
1147 if (state && c->state != state)
1148 continue;
1149
1150 if (c->scid == cid) {
1151 int src_match, dst_match;
1152 int src_any, dst_any;
1153
1154 /* Exact match. */
1155 src_match = !bacmp(&bt_sk(sk)->src, src);
1156 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1157 if (src_match && dst_match) {
1158 read_unlock(&chan_list_lock);
1159 return c;
1160 }
1161
1162 /* Closest match */
1163 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1164 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1165 if ((src_match && dst_any) || (src_any && dst_match) ||
1166 (src_any && dst_any))
1167 c1 = c;
1168 }
1169 }
1170
1171 read_unlock(&chan_list_lock);
1172
1173 return c1;
1174}
1175
1176static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1177{
1178 struct sock *parent, *sk;
1179 struct l2cap_chan *chan, *pchan;
1180
1181 BT_DBG("");
1182
1183 /* Check if we have socket listening on cid */
1184 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1185 conn->src, conn->dst);
1186 if (!pchan)
1187 return;
1188
1189 parent = pchan->sk;
1190
1191 lock_sock(parent);
1192
1193 /* Check for backlog size */
1194 if (sk_acceptq_is_full(parent)) {
1195 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1196 goto clean;
1197 }
1198
1199 chan = pchan->ops->new_connection(pchan->data);
1200 if (!chan)
1201 goto clean;
1202
1203 sk = chan->sk;
1204
1205 hci_conn_hold(conn->hcon);
1206
1207 bacpy(&bt_sk(sk)->src, conn->src);
1208 bacpy(&bt_sk(sk)->dst, conn->dst);
1209
1210 bt_accept_enqueue(parent, sk);
1211
1212 l2cap_chan_add(conn, chan);
1213
1214 __set_chan_timer(chan, sk->sk_sndtimeo);
1215
1216 __l2cap_state_change(chan, BT_CONNECTED);
1217 parent->sk_data_ready(parent, 0);
1218
1219clean:
1220 release_sock(parent);
1221}
1222
1223static void l2cap_conn_ready(struct l2cap_conn *conn)
1224{
1225 struct l2cap_chan *chan;
1226
1227 BT_DBG("conn %p", conn);
1228
1229 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1230 l2cap_le_conn_ready(conn);
1231
1232 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1233 smp_conn_security(conn, conn->hcon->pending_sec_level);
1234
1235 mutex_lock(&conn->chan_lock);
1236
1237 list_for_each_entry(chan, &conn->chan_l, list) {
1238
1239 l2cap_chan_lock(chan);
1240
1241 if (conn->hcon->type == LE_LINK) {
1242 if (smp_conn_security(conn, chan->sec_level))
1243 l2cap_chan_ready(chan);
1244
1245 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1246 struct sock *sk = chan->sk;
1247 __clear_chan_timer(chan);
1248 lock_sock(sk);
1249 __l2cap_state_change(chan, BT_CONNECTED);
1250 sk->sk_state_change(sk);
1251 release_sock(sk);
1252
1253 } else if (chan->state == BT_CONNECT)
1254 l2cap_do_start(chan);
1255
1256 l2cap_chan_unlock(chan);
1257 }
1258
1259 mutex_unlock(&conn->chan_lock);
1260}
1261
1262/* Notify sockets that we cannot guaranty reliability anymore */
1263static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1264{
1265 struct l2cap_chan *chan;
1266
1267 BT_DBG("conn %p", conn);
1268
1269 mutex_lock(&conn->chan_lock);
1270
1271 list_for_each_entry(chan, &conn->chan_l, list) {
1272 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1273 __l2cap_chan_set_err(chan, err);
1274 }
1275
1276 mutex_unlock(&conn->chan_lock);
1277}
1278
1279static void l2cap_info_timeout(struct work_struct *work)
1280{
1281 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1282 info_timer.work);
1283
1284 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1285 conn->info_ident = 0;
1286
1287 l2cap_conn_start(conn);
1288}
1289
1290static void l2cap_conn_del(struct hci_conn *hcon, int err)
1291{
1292 struct l2cap_conn *conn = hcon->l2cap_data;
1293 struct l2cap_chan *chan, *l;
1294
1295 if (!conn)
1296 return;
1297
1298 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1299
1300 kfree_skb(conn->rx_skb);
1301
1302 mutex_lock(&conn->chan_lock);
1303
1304 /* Kill channels */
1305 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1306 l2cap_chan_hold(chan);
1307 l2cap_chan_lock(chan);
1308
1309 l2cap_chan_del(chan, err);
1310
1311 l2cap_chan_unlock(chan);
1312
1313 chan->ops->close(chan->data);
1314 l2cap_chan_put(chan);
1315 }
1316
1317 mutex_unlock(&conn->chan_lock);
1318
1319 hci_chan_del(conn->hchan);
1320
1321 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1322 cancel_delayed_work_sync(&conn->info_timer);
1323
1324 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1325 cancel_delayed_work_sync(&conn->security_timer);
1326 smp_chan_destroy(conn);
1327 }
1328
1329 hcon->l2cap_data = NULL;
1330 kfree(conn);
1331}
1332
1333static void security_timeout(struct work_struct *work)
1334{
1335 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1336 security_timer.work);
1337
1338 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1339}
1340
1341static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1342{
1343 struct l2cap_conn *conn = hcon->l2cap_data;
1344 struct hci_chan *hchan;
1345
1346 if (conn || status)
1347 return conn;
1348
1349 hchan = hci_chan_create(hcon);
1350 if (!hchan)
1351 return NULL;
1352
1353 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1354 if (!conn) {
1355 hci_chan_del(hchan);
1356 return NULL;
1357 }
1358
1359 hcon->l2cap_data = conn;
1360 conn->hcon = hcon;
1361 conn->hchan = hchan;
1362
1363 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1364
1365 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1366 conn->mtu = hcon->hdev->le_mtu;
1367 else
1368 conn->mtu = hcon->hdev->acl_mtu;
1369
1370 conn->src = &hcon->hdev->bdaddr;
1371 conn->dst = &hcon->dst;
1372
1373 conn->feat_mask = 0;
1374
1375 spin_lock_init(&conn->lock);
1376 mutex_init(&conn->chan_lock);
1377
1378 INIT_LIST_HEAD(&conn->chan_l);
1379
1380 if (hcon->type == LE_LINK)
1381 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1382 else
1383 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1384
1385 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1386
1387 return conn;
1388}
1389
1390/* ---- Socket interface ---- */
1391
1392/* Find socket with psm and source / destination bdaddr.
1393 * Returns closest match.
1394 */
1395static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1396 bdaddr_t *src,
1397 bdaddr_t *dst)
1398{
1399 struct l2cap_chan *c, *c1 = NULL;
1400
1401 read_lock(&chan_list_lock);
1402
1403 list_for_each_entry(c, &chan_list, global_l) {
1404 struct sock *sk = c->sk;
1405
1406 if (state && c->state != state)
1407 continue;
1408
1409 if (c->psm == psm) {
1410 int src_match, dst_match;
1411 int src_any, dst_any;
1412
1413 /* Exact match. */
1414 src_match = !bacmp(&bt_sk(sk)->src, src);
1415 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1416 if (src_match && dst_match) {
1417 read_unlock(&chan_list_lock);
1418 return c;
1419 }
1420
1421 /* Closest match */
1422 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1423 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1424 if ((src_match && dst_any) || (src_any && dst_match) ||
1425 (src_any && dst_any))
1426 c1 = c;
1427 }
1428 }
1429
1430 read_unlock(&chan_list_lock);
1431
1432 return c1;
1433}
1434
1435int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1436 bdaddr_t *dst, u8 dst_type)
1437{
1438 struct sock *sk = chan->sk;
1439 bdaddr_t *src = &bt_sk(sk)->src;
1440 struct l2cap_conn *conn;
1441 struct hci_conn *hcon;
1442 struct hci_dev *hdev;
1443 __u8 auth_type;
1444 int err;
1445
1446 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1447 dst_type, __le16_to_cpu(chan->psm));
1448
1449 hdev = hci_get_route(dst, src);
1450 if (!hdev)
1451 return -EHOSTUNREACH;
1452
1453 hci_dev_lock(hdev);
1454
1455 l2cap_chan_lock(chan);
1456
1457 /* PSM must be odd and lsb of upper byte must be 0 */
1458 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1459 chan->chan_type != L2CAP_CHAN_RAW) {
1460 err = -EINVAL;
1461 goto done;
1462 }
1463
1464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1465 err = -EINVAL;
1466 goto done;
1467 }
1468
1469 switch (chan->mode) {
1470 case L2CAP_MODE_BASIC:
1471 break;
1472 case L2CAP_MODE_ERTM:
1473 case L2CAP_MODE_STREAMING:
1474 if (!disable_ertm)
1475 break;
1476 /* fall through */
1477 default:
1478 err = -ENOTSUPP;
1479 goto done;
1480 }
1481
1482 lock_sock(sk);
1483
1484 switch (sk->sk_state) {
1485 case BT_CONNECT:
1486 case BT_CONNECT2:
1487 case BT_CONFIG:
1488 /* Already connecting */
1489 err = 0;
1490 release_sock(sk);
1491 goto done;
1492
1493 case BT_CONNECTED:
1494 /* Already connected */
1495 err = -EISCONN;
1496 release_sock(sk);
1497 goto done;
1498
1499 case BT_OPEN:
1500 case BT_BOUND:
1501 /* Can connect */
1502 break;
1503
1504 default:
1505 err = -EBADFD;
1506 release_sock(sk);
1507 goto done;
1508 }
1509
1510 /* Set destination address and psm */
1511 bacpy(&bt_sk(sk)->dst, dst);
1512
1513 release_sock(sk);
1514
1515 chan->psm = psm;
1516 chan->dcid = cid;
1517
1518 auth_type = l2cap_get_auth_type(chan);
1519
1520 if (chan->dcid == L2CAP_CID_LE_DATA)
1521 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1522 chan->sec_level, auth_type);
1523 else
1524 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1525 chan->sec_level, auth_type);
1526
1527 if (IS_ERR(hcon)) {
1528 err = PTR_ERR(hcon);
1529 goto done;
1530 }
1531
1532 conn = l2cap_conn_add(hcon, 0);
1533 if (!conn) {
1534 hci_conn_put(hcon);
1535 err = -ENOMEM;
1536 goto done;
1537 }
1538
1539 if (hcon->type == LE_LINK) {
1540 err = 0;
1541
1542 if (!list_empty(&conn->chan_l)) {
1543 err = -EBUSY;
1544 hci_conn_put(hcon);
1545 }
1546
1547 if (err)
1548 goto done;
1549 }
1550
1551 /* Update source addr of the socket */
1552 bacpy(src, conn->src);
1553
1554 l2cap_chan_unlock(chan);
1555 l2cap_chan_add(conn, chan);
1556 l2cap_chan_lock(chan);
1557
1558 l2cap_state_change(chan, BT_CONNECT);
1559 __set_chan_timer(chan, sk->sk_sndtimeo);
1560
1561 if (hcon->state == BT_CONNECTED) {
1562 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1563 __clear_chan_timer(chan);
1564 if (l2cap_chan_check_security(chan))
1565 l2cap_state_change(chan, BT_CONNECTED);
1566 } else
1567 l2cap_do_start(chan);
1568 }
1569
1570 err = 0;
1571
1572done:
1573 l2cap_chan_unlock(chan);
1574 hci_dev_unlock(hdev);
1575 hci_dev_put(hdev);
1576 return err;
1577}
1578
1579int __l2cap_wait_ack(struct sock *sk)
1580{
1581 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1582 DECLARE_WAITQUEUE(wait, current);
1583 int err = 0;
1584 int timeo = HZ/5;
1585
1586 add_wait_queue(sk_sleep(sk), &wait);
1587 set_current_state(TASK_INTERRUPTIBLE);
1588 while (chan->unacked_frames > 0 && chan->conn) {
1589 if (!timeo)
1590 timeo = HZ/5;
1591
1592 if (signal_pending(current)) {
1593 err = sock_intr_errno(timeo);
1594 break;
1595 }
1596
1597 release_sock(sk);
1598 timeo = schedule_timeout(timeo);
1599 lock_sock(sk);
1600 set_current_state(TASK_INTERRUPTIBLE);
1601
1602 err = sock_error(sk);
1603 if (err)
1604 break;
1605 }
1606 set_current_state(TASK_RUNNING);
1607 remove_wait_queue(sk_sleep(sk), &wait);
1608 return err;
1609}
1610
1611static void l2cap_monitor_timeout(struct work_struct *work)
1612{
1613 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1614 monitor_timer.work);
1615
1616 BT_DBG("chan %p", chan);
1617
1618 l2cap_chan_lock(chan);
1619
1620 if (chan->retry_count >= chan->remote_max_tx) {
1621 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1622 l2cap_chan_unlock(chan);
1623 l2cap_chan_put(chan);
1624 return;
1625 }
1626
1627 chan->retry_count++;
1628 __set_monitor_timer(chan);
1629
1630 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1631 l2cap_chan_unlock(chan);
1632 l2cap_chan_put(chan);
1633}
1634
1635static void l2cap_retrans_timeout(struct work_struct *work)
1636{
1637 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1638 retrans_timer.work);
1639
1640 BT_DBG("chan %p", chan);
1641
1642 l2cap_chan_lock(chan);
1643
1644 chan->retry_count = 1;
1645 __set_monitor_timer(chan);
1646
1647 set_bit(CONN_WAIT_F, &chan->conn_state);
1648
1649 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1650
1651 l2cap_chan_unlock(chan);
1652 l2cap_chan_put(chan);
1653}
1654
1655static int l2cap_streaming_send(struct l2cap_chan *chan,
1656 struct sk_buff_head *skbs)
1657{
1658 struct sk_buff *skb;
1659 struct l2cap_ctrl *control;
1660
1661 BT_DBG("chan %p, skbs %p", chan, skbs);
1662
1663 if (chan->state != BT_CONNECTED)
1664 return -ENOTCONN;
1665
1666 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1667
1668 while (!skb_queue_empty(&chan->tx_q)) {
1669
1670 skb = skb_dequeue(&chan->tx_q);
1671
1672 bt_cb(skb)->control.retries = 1;
1673 control = &bt_cb(skb)->control;
1674
1675 control->reqseq = 0;
1676 control->txseq = chan->next_tx_seq;
1677
1678 __pack_control(chan, control, skb);
1679
1680 if (chan->fcs == L2CAP_FCS_CRC16) {
1681 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1682 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1683 }
1684
1685 l2cap_do_send(chan, skb);
1686
1687 BT_DBG("Sent txseq %d", (int)control->txseq);
1688
1689 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1690 chan->frames_sent++;
1691 }
1692
1693 return 0;
1694}
1695
1696static int l2cap_ertm_send(struct l2cap_chan *chan)
1697{
1698 struct sk_buff *skb, *tx_skb;
1699 struct l2cap_ctrl *control;
1700 int sent = 0;
1701
1702 BT_DBG("chan %p", chan);
1703
1704 if (chan->state != BT_CONNECTED)
1705 return -ENOTCONN;
1706
1707 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1708 return 0;
1709
1710 while (chan->tx_send_head &&
1711 chan->unacked_frames < chan->remote_tx_win &&
1712 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1713
1714 skb = chan->tx_send_head;
1715
1716 bt_cb(skb)->control.retries = 1;
1717 control = &bt_cb(skb)->control;
1718
1719 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1720 control->final = 1;
1721
1722 control->reqseq = chan->buffer_seq;
1723 chan->last_acked_seq = chan->buffer_seq;
1724 control->txseq = chan->next_tx_seq;
1725
1726 __pack_control(chan, control, skb);
1727
1728 if (chan->fcs == L2CAP_FCS_CRC16) {
1729 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1730 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1731 }
1732
1733 /* Clone after data has been modified. Data is assumed to be
1734 read-only (for locking purposes) on cloned sk_buffs.
1735 */
1736 tx_skb = skb_clone(skb, GFP_KERNEL);
1737
1738 if (!tx_skb)
1739 break;
1740
1741 __set_retrans_timer(chan);
1742
1743 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1744 chan->unacked_frames++;
1745 chan->frames_sent++;
1746 sent++;
1747
1748 if (skb_queue_is_last(&chan->tx_q, skb))
1749 chan->tx_send_head = NULL;
1750 else
1751 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1752
1753 l2cap_do_send(chan, tx_skb);
1754 BT_DBG("Sent txseq %d", (int)control->txseq);
1755 }
1756
1757 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1758 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1759
1760 return sent;
1761}
1762
1763static void l2cap_ertm_resend(struct l2cap_chan *chan)
1764{
1765 struct l2cap_ctrl control;
1766 struct sk_buff *skb;
1767 struct sk_buff *tx_skb;
1768 u16 seq;
1769
1770 BT_DBG("chan %p", chan);
1771
1772 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1773 return;
1774
1775 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1776 seq = l2cap_seq_list_pop(&chan->retrans_list);
1777
1778 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1779 if (!skb) {
1780 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1781 seq);
1782 continue;
1783 }
1784
1785 bt_cb(skb)->control.retries++;
1786 control = bt_cb(skb)->control;
1787
1788 if (chan->max_tx != 0 &&
1789 bt_cb(skb)->control.retries > chan->max_tx) {
1790 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1791 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1792 l2cap_seq_list_clear(&chan->retrans_list);
1793 break;
1794 }
1795
1796 control.reqseq = chan->buffer_seq;
1797 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1798 control.final = 1;
1799 else
1800 control.final = 0;
1801
1802 if (skb_cloned(skb)) {
1803 /* Cloned sk_buffs are read-only, so we need a
1804 * writeable copy
1805 */
1806 tx_skb = skb_copy(skb, GFP_ATOMIC);
1807 } else {
1808 tx_skb = skb_clone(skb, GFP_ATOMIC);
1809 }
1810
1811 if (!tx_skb) {
1812 l2cap_seq_list_clear(&chan->retrans_list);
1813 break;
1814 }
1815
1816 /* Update skb contents */
1817 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1818 put_unaligned_le32(__pack_extended_control(&control),
1819 tx_skb->data + L2CAP_HDR_SIZE);
1820 } else {
1821 put_unaligned_le16(__pack_enhanced_control(&control),
1822 tx_skb->data + L2CAP_HDR_SIZE);
1823 }
1824
1825 if (chan->fcs == L2CAP_FCS_CRC16) {
1826 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1827 put_unaligned_le16(fcs, skb_put(tx_skb,
1828 L2CAP_FCS_SIZE));
1829 }
1830
1831 l2cap_do_send(chan, tx_skb);
1832
1833 BT_DBG("Resent txseq %d", control.txseq);
1834
1835 chan->last_acked_seq = chan->buffer_seq;
1836 }
1837}
1838
1839static void l2cap_retransmit_all(struct l2cap_chan *chan,
1840 struct l2cap_ctrl *control)
1841{
1842 struct sk_buff *skb;
1843
1844 BT_DBG("chan %p, control %p", chan, control);
1845
1846 if (control->poll)
1847 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1848
1849 l2cap_seq_list_clear(&chan->retrans_list);
1850
1851 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1852 return;
1853
1854 if (chan->unacked_frames) {
1855 skb_queue_walk(&chan->tx_q, skb) {
1856 if (bt_cb(skb)->control.txseq == control->reqseq ||
1857 skb == chan->tx_send_head)
1858 break;
1859 }
1860
1861 skb_queue_walk_from(&chan->tx_q, skb) {
1862 if (skb == chan->tx_send_head)
1863 break;
1864
1865 l2cap_seq_list_append(&chan->retrans_list,
1866 bt_cb(skb)->control.txseq);
1867 }
1868
1869 l2cap_ertm_resend(chan);
1870 }
1871}
1872
1873static void l2cap_send_ack(struct l2cap_chan *chan)
1874{
1875 struct l2cap_ctrl control;
1876 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1877 chan->last_acked_seq);
1878 int threshold;
1879
1880 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1881 chan, chan->last_acked_seq, chan->buffer_seq);
1882
1883 memset(&control, 0, sizeof(control));
1884 control.sframe = 1;
1885
1886 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1887 chan->rx_state == L2CAP_RX_STATE_RECV) {
1888 __clear_ack_timer(chan);
1889 control.super = L2CAP_SUPER_RNR;
1890 control.reqseq = chan->buffer_seq;
1891 l2cap_send_sframe(chan, &control);
1892 } else {
1893 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1894 l2cap_ertm_send(chan);
1895 /* If any i-frames were sent, they included an ack */
1896 if (chan->buffer_seq == chan->last_acked_seq)
1897 frames_to_ack = 0;
1898 }
1899
1900 /* Ack now if the tx window is 3/4ths full.
1901 * Calculate without mul or div
1902 */
1903 threshold = chan->tx_win;
1904 threshold += threshold << 1;
1905 threshold >>= 2;
1906
1907 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1908 threshold);
1909
1910 if (frames_to_ack >= threshold) {
1911 __clear_ack_timer(chan);
1912 control.super = L2CAP_SUPER_RR;
1913 control.reqseq = chan->buffer_seq;
1914 l2cap_send_sframe(chan, &control);
1915 frames_to_ack = 0;
1916 }
1917
1918 if (frames_to_ack)
1919 __set_ack_timer(chan);
1920 }
1921}
1922
1923static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1924 struct msghdr *msg, int len,
1925 int count, struct sk_buff *skb)
1926{
1927 struct l2cap_conn *conn = chan->conn;
1928 struct sk_buff **frag;
1929 int sent = 0;
1930
1931 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1932 return -EFAULT;
1933
1934 sent += count;
1935 len -= count;
1936
1937 /* Continuation fragments (no L2CAP header) */
1938 frag = &skb_shinfo(skb)->frag_list;
1939 while (len) {
1940 struct sk_buff *tmp;
1941
1942 count = min_t(unsigned int, conn->mtu, len);
1943
1944 tmp = chan->ops->alloc_skb(chan, count,
1945 msg->msg_flags & MSG_DONTWAIT);
1946 if (IS_ERR(tmp))
1947 return PTR_ERR(tmp);
1948
1949 *frag = tmp;
1950
1951 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1952 return -EFAULT;
1953
1954 (*frag)->priority = skb->priority;
1955
1956 sent += count;
1957 len -= count;
1958
1959 skb->len += (*frag)->len;
1960 skb->data_len += (*frag)->len;
1961
1962 frag = &(*frag)->next;
1963 }
1964
1965 return sent;
1966}
1967
1968static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1969 struct msghdr *msg, size_t len,
1970 u32 priority)
1971{
1972 struct l2cap_conn *conn = chan->conn;
1973 struct sk_buff *skb;
1974 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1975 struct l2cap_hdr *lh;
1976
1977 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1978
1979 count = min_t(unsigned int, (conn->mtu - hlen), len);
1980
1981 skb = chan->ops->alloc_skb(chan, count + hlen,
1982 msg->msg_flags & MSG_DONTWAIT);
1983 if (IS_ERR(skb))
1984 return skb;
1985
1986 skb->priority = priority;
1987
1988 /* Create L2CAP header */
1989 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1990 lh->cid = cpu_to_le16(chan->dcid);
1991 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1992 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1993
1994 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1995 if (unlikely(err < 0)) {
1996 kfree_skb(skb);
1997 return ERR_PTR(err);
1998 }
1999 return skb;
2000}
2001
2002static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2003 struct msghdr *msg, size_t len,
2004 u32 priority)
2005{
2006 struct l2cap_conn *conn = chan->conn;
2007 struct sk_buff *skb;
2008 int err, count;
2009 struct l2cap_hdr *lh;
2010
2011 BT_DBG("chan %p len %d", chan, (int)len);
2012
2013 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2014
2015 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2016 msg->msg_flags & MSG_DONTWAIT);
2017 if (IS_ERR(skb))
2018 return skb;
2019
2020 skb->priority = priority;
2021
2022 /* Create L2CAP header */
2023 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2024 lh->cid = cpu_to_le16(chan->dcid);
2025 lh->len = cpu_to_le16(len);
2026
2027 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2028 if (unlikely(err < 0)) {
2029 kfree_skb(skb);
2030 return ERR_PTR(err);
2031 }
2032 return skb;
2033}
2034
2035static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2036 struct msghdr *msg, size_t len,
2037 u16 sdulen)
2038{
2039 struct l2cap_conn *conn = chan->conn;
2040 struct sk_buff *skb;
2041 int err, count, hlen;
2042 struct l2cap_hdr *lh;
2043
2044 BT_DBG("chan %p len %d", chan, (int)len);
2045
2046 if (!conn)
2047 return ERR_PTR(-ENOTCONN);
2048
2049 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2050 hlen = L2CAP_EXT_HDR_SIZE;
2051 else
2052 hlen = L2CAP_ENH_HDR_SIZE;
2053
2054 if (sdulen)
2055 hlen += L2CAP_SDULEN_SIZE;
2056
2057 if (chan->fcs == L2CAP_FCS_CRC16)
2058 hlen += L2CAP_FCS_SIZE;
2059
2060 count = min_t(unsigned int, (conn->mtu - hlen), len);
2061
2062 skb = chan->ops->alloc_skb(chan, count + hlen,
2063 msg->msg_flags & MSG_DONTWAIT);
2064 if (IS_ERR(skb))
2065 return skb;
2066
2067 /* Create L2CAP header */
2068 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2069 lh->cid = cpu_to_le16(chan->dcid);
2070 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2071
2072 /* Control header is populated later */
2073 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2074 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2075 else
2076 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2077
2078 if (sdulen)
2079 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2080
2081 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2082 if (unlikely(err < 0)) {
2083 kfree_skb(skb);
2084 return ERR_PTR(err);
2085 }
2086
2087 bt_cb(skb)->control.fcs = chan->fcs;
2088 bt_cb(skb)->control.retries = 0;
2089 return skb;
2090}
2091
2092static int l2cap_segment_sdu(struct l2cap_chan *chan,
2093 struct sk_buff_head *seg_queue,
2094 struct msghdr *msg, size_t len)
2095{
2096 struct sk_buff *skb;
2097 u16 sdu_len;
2098 size_t pdu_len;
2099 int err = 0;
2100 u8 sar;
2101
2102 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2103
2104 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2105 * so fragmented skbs are not used. The HCI layer's handling
2106 * of fragmented skbs is not compatible with ERTM's queueing.
2107 */
2108
2109 /* PDU size is derived from the HCI MTU */
2110 pdu_len = chan->conn->mtu;
2111
2112 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2113
2114 /* Adjust for largest possible L2CAP overhead. */
2115 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2116
2117 /* Remote device may have requested smaller PDUs */
2118 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2119
2120 if (len <= pdu_len) {
2121 sar = L2CAP_SAR_UNSEGMENTED;
2122 sdu_len = 0;
2123 pdu_len = len;
2124 } else {
2125 sar = L2CAP_SAR_START;
2126 sdu_len = len;
2127 pdu_len -= L2CAP_SDULEN_SIZE;
2128 }
2129
2130 while (len > 0) {
2131 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2132
2133 if (IS_ERR(skb)) {
2134 __skb_queue_purge(seg_queue);
2135 return PTR_ERR(skb);
2136 }
2137
2138 bt_cb(skb)->control.sar = sar;
2139 __skb_queue_tail(seg_queue, skb);
2140
2141 len -= pdu_len;
2142 if (sdu_len) {
2143 sdu_len = 0;
2144 pdu_len += L2CAP_SDULEN_SIZE;
2145 }
2146
2147 if (len <= pdu_len) {
2148 sar = L2CAP_SAR_END;
2149 pdu_len = len;
2150 } else {
2151 sar = L2CAP_SAR_CONTINUE;
2152 }
2153 }
2154
2155 return err;
2156}
2157
2158int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2159 u32 priority)
2160{
2161 struct sk_buff *skb;
2162 int err;
2163 struct sk_buff_head seg_queue;
2164
2165 /* Connectionless channel */
2166 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2167 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2168 if (IS_ERR(skb))
2169 return PTR_ERR(skb);
2170
2171 l2cap_do_send(chan, skb);
2172 return len;
2173 }
2174
2175 switch (chan->mode) {
2176 case L2CAP_MODE_BASIC:
2177 /* Check outgoing MTU */
2178 if (len > chan->omtu)
2179 return -EMSGSIZE;
2180
2181 /* Create a basic PDU */
2182 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2183 if (IS_ERR(skb))
2184 return PTR_ERR(skb);
2185
2186 l2cap_do_send(chan, skb);
2187 err = len;
2188 break;
2189
2190 case L2CAP_MODE_ERTM:
2191 case L2CAP_MODE_STREAMING:
2192 /* Check outgoing MTU */
2193 if (len > chan->omtu) {
2194 err = -EMSGSIZE;
2195 break;
2196 }
2197
2198 __skb_queue_head_init(&seg_queue);
2199
2200 /* Do segmentation before calling in to the state machine,
2201 * since it's possible to block while waiting for memory
2202 * allocation.
2203 */
2204 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2205
2206 /* The channel could have been closed while segmenting,
2207 * check that it is still connected.
2208 */
2209 if (chan->state != BT_CONNECTED) {
2210 __skb_queue_purge(&seg_queue);
2211 err = -ENOTCONN;
2212 }
2213
2214 if (err)
2215 break;
2216
2217 if (chan->mode == L2CAP_MODE_ERTM)
2218 err = l2cap_tx(chan, 0, &seg_queue,
2219 L2CAP_EV_DATA_REQUEST);
2220 else
2221 err = l2cap_streaming_send(chan, &seg_queue);
2222
2223 if (!err)
2224 err = len;
2225
2226 /* If the skbs were not queued for sending, they'll still be in
2227 * seg_queue and need to be purged.
2228 */
2229 __skb_queue_purge(&seg_queue);
2230 break;
2231
2232 default:
2233 BT_DBG("bad state %1.1x", chan->mode);
2234 err = -EBADFD;
2235 }
2236
2237 return err;
2238}
2239
2240static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2241{
2242 struct l2cap_ctrl control;
2243 u16 seq;
2244
2245 BT_DBG("chan %p, txseq %d", chan, txseq);
2246
2247 memset(&control, 0, sizeof(control));
2248 control.sframe = 1;
2249 control.super = L2CAP_SUPER_SREJ;
2250
2251 for (seq = chan->expected_tx_seq; seq != txseq;
2252 seq = __next_seq(chan, seq)) {
2253 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2254 control.reqseq = seq;
2255 l2cap_send_sframe(chan, &control);
2256 l2cap_seq_list_append(&chan->srej_list, seq);
2257 }
2258 }
2259
2260 chan->expected_tx_seq = __next_seq(chan, txseq);
2261}
2262
2263static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2264{
2265 struct l2cap_ctrl control;
2266
2267 BT_DBG("chan %p", chan);
2268
2269 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2270 return;
2271
2272 memset(&control, 0, sizeof(control));
2273 control.sframe = 1;
2274 control.super = L2CAP_SUPER_SREJ;
2275 control.reqseq = chan->srej_list.tail;
2276 l2cap_send_sframe(chan, &control);
2277}
2278
2279static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2280{
2281 struct l2cap_ctrl control;
2282 u16 initial_head;
2283 u16 seq;
2284
2285 BT_DBG("chan %p, txseq %d", chan, txseq);
2286
2287 memset(&control, 0, sizeof(control));
2288 control.sframe = 1;
2289 control.super = L2CAP_SUPER_SREJ;
2290
2291 /* Capture initial list head to allow only one pass through the list. */
2292 initial_head = chan->srej_list.head;
2293
2294 do {
2295 seq = l2cap_seq_list_pop(&chan->srej_list);
2296 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2297 break;
2298
2299 control.reqseq = seq;
2300 l2cap_send_sframe(chan, &control);
2301 l2cap_seq_list_append(&chan->srej_list, seq);
2302 } while (chan->srej_list.head != initial_head);
2303}
2304
2305static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2306{
2307 struct sk_buff *acked_skb;
2308 u16 ackseq;
2309
2310 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2311
2312 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2313 return;
2314
2315 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2316 chan->expected_ack_seq, chan->unacked_frames);
2317
2318 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2319 ackseq = __next_seq(chan, ackseq)) {
2320
2321 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2322 if (acked_skb) {
2323 skb_unlink(acked_skb, &chan->tx_q);
2324 kfree_skb(acked_skb);
2325 chan->unacked_frames--;
2326 }
2327 }
2328
2329 chan->expected_ack_seq = reqseq;
2330
2331 if (chan->unacked_frames == 0)
2332 __clear_retrans_timer(chan);
2333
2334 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2335}
2336
2337static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2338{
2339 BT_DBG("chan %p", chan);
2340
2341 chan->expected_tx_seq = chan->buffer_seq;
2342 l2cap_seq_list_clear(&chan->srej_list);
2343 skb_queue_purge(&chan->srej_q);
2344 chan->rx_state = L2CAP_RX_STATE_RECV;
2345}
2346
2347static int l2cap_tx_state_xmit(struct l2cap_chan *chan,
2348 struct l2cap_ctrl *control,
2349 struct sk_buff_head *skbs, u8 event)
2350{
2351 int err = 0;
2352
2353 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2354 event);
2355
2356 switch (event) {
2357 case L2CAP_EV_DATA_REQUEST:
2358 if (chan->tx_send_head == NULL)
2359 chan->tx_send_head = skb_peek(skbs);
2360
2361 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2362 l2cap_ertm_send(chan);
2363 break;
2364 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2365 BT_DBG("Enter LOCAL_BUSY");
2366 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2367
2368 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2369 /* The SREJ_SENT state must be aborted if we are to
2370 * enter the LOCAL_BUSY state.
2371 */
2372 l2cap_abort_rx_srej_sent(chan);
2373 }
2374
2375 l2cap_send_ack(chan);
2376
2377 break;
2378 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2379 BT_DBG("Exit LOCAL_BUSY");
2380 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2381
2382 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2383 struct l2cap_ctrl local_control;
2384
2385 memset(&local_control, 0, sizeof(local_control));
2386 local_control.sframe = 1;
2387 local_control.super = L2CAP_SUPER_RR;
2388 local_control.poll = 1;
2389 local_control.reqseq = chan->buffer_seq;
2390 l2cap_send_sframe(chan, &local_control);
2391
2392 chan->retry_count = 1;
2393 __set_monitor_timer(chan);
2394 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2395 }
2396 break;
2397 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2398 l2cap_process_reqseq(chan, control->reqseq);
2399 break;
2400 case L2CAP_EV_EXPLICIT_POLL:
2401 l2cap_send_rr_or_rnr(chan, 1);
2402 chan->retry_count = 1;
2403 __set_monitor_timer(chan);
2404 __clear_ack_timer(chan);
2405 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2406 break;
2407 case L2CAP_EV_RETRANS_TO:
2408 l2cap_send_rr_or_rnr(chan, 1);
2409 chan->retry_count = 1;
2410 __set_monitor_timer(chan);
2411 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2412 break;
2413 case L2CAP_EV_RECV_FBIT:
2414 /* Nothing to process */
2415 break;
2416 default:
2417 break;
2418 }
2419
2420 return err;
2421}
2422
2423static int l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2424 struct l2cap_ctrl *control,
2425 struct sk_buff_head *skbs, u8 event)
2426{
2427 int err = 0;
2428
2429 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2430 event);
2431
2432 switch (event) {
2433 case L2CAP_EV_DATA_REQUEST:
2434 if (chan->tx_send_head == NULL)
2435 chan->tx_send_head = skb_peek(skbs);
2436 /* Queue data, but don't send. */
2437 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2438 break;
2439 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2440 BT_DBG("Enter LOCAL_BUSY");
2441 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2442
2443 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2444 /* The SREJ_SENT state must be aborted if we are to
2445 * enter the LOCAL_BUSY state.
2446 */
2447 l2cap_abort_rx_srej_sent(chan);
2448 }
2449
2450 l2cap_send_ack(chan);
2451
2452 break;
2453 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2454 BT_DBG("Exit LOCAL_BUSY");
2455 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2456
2457 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2458 struct l2cap_ctrl local_control;
2459 memset(&local_control, 0, sizeof(local_control));
2460 local_control.sframe = 1;
2461 local_control.super = L2CAP_SUPER_RR;
2462 local_control.poll = 1;
2463 local_control.reqseq = chan->buffer_seq;
2464 l2cap_send_sframe(chan, &local_control);
2465
2466 chan->retry_count = 1;
2467 __set_monitor_timer(chan);
2468 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2469 }
2470 break;
2471 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2472 l2cap_process_reqseq(chan, control->reqseq);
2473
2474 /* Fall through */
2475
2476 case L2CAP_EV_RECV_FBIT:
2477 if (control && control->final) {
2478 __clear_monitor_timer(chan);
2479 if (chan->unacked_frames > 0)
2480 __set_retrans_timer(chan);
2481 chan->retry_count = 0;
2482 chan->tx_state = L2CAP_TX_STATE_XMIT;
2483 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2484 }
2485 break;
2486 case L2CAP_EV_EXPLICIT_POLL:
2487 /* Ignore */
2488 break;
2489 case L2CAP_EV_MONITOR_TO:
2490 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2491 l2cap_send_rr_or_rnr(chan, 1);
2492 __set_monitor_timer(chan);
2493 chan->retry_count++;
2494 } else {
2495 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2496 }
2497 break;
2498 default:
2499 break;
2500 }
2501
2502 return err;
2503}
2504
2505static int l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2506 struct sk_buff_head *skbs, u8 event)
2507{
2508 int err = 0;
2509
2510 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2511 chan, control, skbs, event, chan->tx_state);
2512
2513 switch (chan->tx_state) {
2514 case L2CAP_TX_STATE_XMIT:
2515 err = l2cap_tx_state_xmit(chan, control, skbs, event);
2516 break;
2517 case L2CAP_TX_STATE_WAIT_F:
2518 err = l2cap_tx_state_wait_f(chan, control, skbs, event);
2519 break;
2520 default:
2521 /* Ignore event */
2522 break;
2523 }
2524
2525 return err;
2526}
2527
2528static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2529 struct l2cap_ctrl *control)
2530{
2531 BT_DBG("chan %p, control %p", chan, control);
2532 l2cap_tx(chan, control, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2533}
2534
2535/* Copy frame to all raw sockets on that connection */
2536static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2537{
2538 struct sk_buff *nskb;
2539 struct l2cap_chan *chan;
2540
2541 BT_DBG("conn %p", conn);
2542
2543 mutex_lock(&conn->chan_lock);
2544
2545 list_for_each_entry(chan, &conn->chan_l, list) {
2546 struct sock *sk = chan->sk;
2547 if (chan->chan_type != L2CAP_CHAN_RAW)
2548 continue;
2549
2550 /* Don't send frame to the socket it came from */
2551 if (skb->sk == sk)
2552 continue;
2553 nskb = skb_clone(skb, GFP_ATOMIC);
2554 if (!nskb)
2555 continue;
2556
2557 if (chan->ops->recv(chan->data, nskb))
2558 kfree_skb(nskb);
2559 }
2560
2561 mutex_unlock(&conn->chan_lock);
2562}
2563
2564/* ---- L2CAP signalling commands ---- */
2565static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2566 u8 code, u8 ident, u16 dlen, void *data)
2567{
2568 struct sk_buff *skb, **frag;
2569 struct l2cap_cmd_hdr *cmd;
2570 struct l2cap_hdr *lh;
2571 int len, count;
2572
2573 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2574 conn, code, ident, dlen);
2575
2576 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2577 count = min_t(unsigned int, conn->mtu, len);
2578
2579 skb = bt_skb_alloc(count, GFP_ATOMIC);
2580 if (!skb)
2581 return NULL;
2582
2583 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2584 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2585
2586 if (conn->hcon->type == LE_LINK)
2587 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2588 else
2589 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2590
2591 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2592 cmd->code = code;
2593 cmd->ident = ident;
2594 cmd->len = cpu_to_le16(dlen);
2595
2596 if (dlen) {
2597 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2598 memcpy(skb_put(skb, count), data, count);
2599 data += count;
2600 }
2601
2602 len -= skb->len;
2603
2604 /* Continuation fragments (no L2CAP header) */
2605 frag = &skb_shinfo(skb)->frag_list;
2606 while (len) {
2607 count = min_t(unsigned int, conn->mtu, len);
2608
2609 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2610 if (!*frag)
2611 goto fail;
2612
2613 memcpy(skb_put(*frag, count), data, count);
2614
2615 len -= count;
2616 data += count;
2617
2618 frag = &(*frag)->next;
2619 }
2620
2621 return skb;
2622
2623fail:
2624 kfree_skb(skb);
2625 return NULL;
2626}
2627
2628static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2629{
2630 struct l2cap_conf_opt *opt = *ptr;
2631 int len;
2632
2633 len = L2CAP_CONF_OPT_SIZE + opt->len;
2634 *ptr += len;
2635
2636 *type = opt->type;
2637 *olen = opt->len;
2638
2639 switch (opt->len) {
2640 case 1:
2641 *val = *((u8 *) opt->val);
2642 break;
2643
2644 case 2:
2645 *val = get_unaligned_le16(opt->val);
2646 break;
2647
2648 case 4:
2649 *val = get_unaligned_le32(opt->val);
2650 break;
2651
2652 default:
2653 *val = (unsigned long) opt->val;
2654 break;
2655 }
2656
2657 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2658 return len;
2659}
2660
2661static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2662{
2663 struct l2cap_conf_opt *opt = *ptr;
2664
2665 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2666
2667 opt->type = type;
2668 opt->len = len;
2669
2670 switch (len) {
2671 case 1:
2672 *((u8 *) opt->val) = val;
2673 break;
2674
2675 case 2:
2676 put_unaligned_le16(val, opt->val);
2677 break;
2678
2679 case 4:
2680 put_unaligned_le32(val, opt->val);
2681 break;
2682
2683 default:
2684 memcpy(opt->val, (void *) val, len);
2685 break;
2686 }
2687
2688 *ptr += L2CAP_CONF_OPT_SIZE + len;
2689}
2690
2691static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2692{
2693 struct l2cap_conf_efs efs;
2694
2695 switch (chan->mode) {
2696 case L2CAP_MODE_ERTM:
2697 efs.id = chan->local_id;
2698 efs.stype = chan->local_stype;
2699 efs.msdu = cpu_to_le16(chan->local_msdu);
2700 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2701 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2702 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2703 break;
2704
2705 case L2CAP_MODE_STREAMING:
2706 efs.id = 1;
2707 efs.stype = L2CAP_SERV_BESTEFFORT;
2708 efs.msdu = cpu_to_le16(chan->local_msdu);
2709 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2710 efs.acc_lat = 0;
2711 efs.flush_to = 0;
2712 break;
2713
2714 default:
2715 return;
2716 }
2717
2718 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2719 (unsigned long) &efs);
2720}
2721
2722static void l2cap_ack_timeout(struct work_struct *work)
2723{
2724 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2725 ack_timer.work);
2726
2727 BT_DBG("chan %p", chan);
2728
2729 l2cap_chan_lock(chan);
2730
2731 l2cap_send_ack(chan);
2732
2733 l2cap_chan_unlock(chan);
2734
2735 l2cap_chan_put(chan);
2736}
2737
2738static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2739{
2740 int err;
2741
2742 chan->next_tx_seq = 0;
2743 chan->expected_tx_seq = 0;
2744 chan->expected_ack_seq = 0;
2745 chan->unacked_frames = 0;
2746 chan->buffer_seq = 0;
2747 chan->frames_sent = 0;
2748 chan->last_acked_seq = 0;
2749 chan->sdu = NULL;
2750 chan->sdu_last_frag = NULL;
2751 chan->sdu_len = 0;
2752
2753 skb_queue_head_init(&chan->tx_q);
2754
2755 if (chan->mode != L2CAP_MODE_ERTM)
2756 return 0;
2757
2758 chan->rx_state = L2CAP_RX_STATE_RECV;
2759 chan->tx_state = L2CAP_TX_STATE_XMIT;
2760
2761 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2762 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2763 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2764
2765 skb_queue_head_init(&chan->srej_q);
2766
2767 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2768 if (err < 0)
2769 return err;
2770
2771 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2772 if (err < 0)
2773 l2cap_seq_list_free(&chan->srej_list);
2774
2775 return err;
2776}
2777
2778static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2779{
2780 switch (mode) {
2781 case L2CAP_MODE_STREAMING:
2782 case L2CAP_MODE_ERTM:
2783 if (l2cap_mode_supported(mode, remote_feat_mask))
2784 return mode;
2785 /* fall through */
2786 default:
2787 return L2CAP_MODE_BASIC;
2788 }
2789}
2790
2791static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2792{
2793 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2794}
2795
2796static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2797{
2798 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2799}
2800
2801static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2802{
2803 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2804 __l2cap_ews_supported(chan)) {
2805 /* use extended control field */
2806 set_bit(FLAG_EXT_CTRL, &chan->flags);
2807 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2808 } else {
2809 chan->tx_win = min_t(u16, chan->tx_win,
2810 L2CAP_DEFAULT_TX_WINDOW);
2811 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2812 }
2813}
2814
2815static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2816{
2817 struct l2cap_conf_req *req = data;
2818 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2819 void *ptr = req->data;
2820 u16 size;
2821
2822 BT_DBG("chan %p", chan);
2823
2824 if (chan->num_conf_req || chan->num_conf_rsp)
2825 goto done;
2826
2827 switch (chan->mode) {
2828 case L2CAP_MODE_STREAMING:
2829 case L2CAP_MODE_ERTM:
2830 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2831 break;
2832
2833 if (__l2cap_efs_supported(chan))
2834 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2835
2836 /* fall through */
2837 default:
2838 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2839 break;
2840 }
2841
2842done:
2843 if (chan->imtu != L2CAP_DEFAULT_MTU)
2844 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2845
2846 switch (chan->mode) {
2847 case L2CAP_MODE_BASIC:
2848 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2849 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2850 break;
2851
2852 rfc.mode = L2CAP_MODE_BASIC;
2853 rfc.txwin_size = 0;
2854 rfc.max_transmit = 0;
2855 rfc.retrans_timeout = 0;
2856 rfc.monitor_timeout = 0;
2857 rfc.max_pdu_size = 0;
2858
2859 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2860 (unsigned long) &rfc);
2861 break;
2862
2863 case L2CAP_MODE_ERTM:
2864 rfc.mode = L2CAP_MODE_ERTM;
2865 rfc.max_transmit = chan->max_tx;
2866 rfc.retrans_timeout = 0;
2867 rfc.monitor_timeout = 0;
2868
2869 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2870 L2CAP_EXT_HDR_SIZE -
2871 L2CAP_SDULEN_SIZE -
2872 L2CAP_FCS_SIZE);
2873 rfc.max_pdu_size = cpu_to_le16(size);
2874
2875 l2cap_txwin_setup(chan);
2876
2877 rfc.txwin_size = min_t(u16, chan->tx_win,
2878 L2CAP_DEFAULT_TX_WINDOW);
2879
2880 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2881 (unsigned long) &rfc);
2882
2883 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2884 l2cap_add_opt_efs(&ptr, chan);
2885
2886 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2887 break;
2888
2889 if (chan->fcs == L2CAP_FCS_NONE ||
2890 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2891 chan->fcs = L2CAP_FCS_NONE;
2892 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2893 }
2894
2895 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2896 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2897 chan->tx_win);
2898 break;
2899
2900 case L2CAP_MODE_STREAMING:
2901 rfc.mode = L2CAP_MODE_STREAMING;
2902 rfc.txwin_size = 0;
2903 rfc.max_transmit = 0;
2904 rfc.retrans_timeout = 0;
2905 rfc.monitor_timeout = 0;
2906
2907 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2908 L2CAP_EXT_HDR_SIZE -
2909 L2CAP_SDULEN_SIZE -
2910 L2CAP_FCS_SIZE);
2911 rfc.max_pdu_size = cpu_to_le16(size);
2912
2913 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2914 (unsigned long) &rfc);
2915
2916 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2917 l2cap_add_opt_efs(&ptr, chan);
2918
2919 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2920 break;
2921
2922 if (chan->fcs == L2CAP_FCS_NONE ||
2923 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2924 chan->fcs = L2CAP_FCS_NONE;
2925 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2926 }
2927 break;
2928 }
2929
2930 req->dcid = cpu_to_le16(chan->dcid);
2931 req->flags = cpu_to_le16(0);
2932
2933 return ptr - data;
2934}
2935
2936static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2937{
2938 struct l2cap_conf_rsp *rsp = data;
2939 void *ptr = rsp->data;
2940 void *req = chan->conf_req;
2941 int len = chan->conf_len;
2942 int type, hint, olen;
2943 unsigned long val;
2944 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2945 struct l2cap_conf_efs efs;
2946 u8 remote_efs = 0;
2947 u16 mtu = L2CAP_DEFAULT_MTU;
2948 u16 result = L2CAP_CONF_SUCCESS;
2949 u16 size;
2950
2951 BT_DBG("chan %p", chan);
2952
2953 while (len >= L2CAP_CONF_OPT_SIZE) {
2954 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2955
2956 hint = type & L2CAP_CONF_HINT;
2957 type &= L2CAP_CONF_MASK;
2958
2959 switch (type) {
2960 case L2CAP_CONF_MTU:
2961 mtu = val;
2962 break;
2963
2964 case L2CAP_CONF_FLUSH_TO:
2965 chan->flush_to = val;
2966 break;
2967
2968 case L2CAP_CONF_QOS:
2969 break;
2970
2971 case L2CAP_CONF_RFC:
2972 if (olen == sizeof(rfc))
2973 memcpy(&rfc, (void *) val, olen);
2974 break;
2975
2976 case L2CAP_CONF_FCS:
2977 if (val == L2CAP_FCS_NONE)
2978 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2979 break;
2980
2981 case L2CAP_CONF_EFS:
2982 remote_efs = 1;
2983 if (olen == sizeof(efs))
2984 memcpy(&efs, (void *) val, olen);
2985 break;
2986
2987 case L2CAP_CONF_EWS:
2988 if (!enable_hs)
2989 return -ECONNREFUSED;
2990
2991 set_bit(FLAG_EXT_CTRL, &chan->flags);
2992 set_bit(CONF_EWS_RECV, &chan->conf_state);
2993 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2994 chan->remote_tx_win = val;
2995 break;
2996
2997 default:
2998 if (hint)
2999 break;
3000
3001 result = L2CAP_CONF_UNKNOWN;
3002 *((u8 *) ptr++) = type;
3003 break;
3004 }
3005 }
3006
3007 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3008 goto done;
3009
3010 switch (chan->mode) {
3011 case L2CAP_MODE_STREAMING:
3012 case L2CAP_MODE_ERTM:
3013 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3014 chan->mode = l2cap_select_mode(rfc.mode,
3015 chan->conn->feat_mask);
3016 break;
3017 }
3018
3019 if (remote_efs) {
3020 if (__l2cap_efs_supported(chan))
3021 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3022 else
3023 return -ECONNREFUSED;
3024 }
3025
3026 if (chan->mode != rfc.mode)
3027 return -ECONNREFUSED;
3028
3029 break;
3030 }
3031
3032done:
3033 if (chan->mode != rfc.mode) {
3034 result = L2CAP_CONF_UNACCEPT;
3035 rfc.mode = chan->mode;
3036
3037 if (chan->num_conf_rsp == 1)
3038 return -ECONNREFUSED;
3039
3040 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3041 sizeof(rfc), (unsigned long) &rfc);
3042 }
3043
3044 if (result == L2CAP_CONF_SUCCESS) {
3045 /* Configure output options and let the other side know
3046 * which ones we don't like. */
3047
3048 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3049 result = L2CAP_CONF_UNACCEPT;
3050 else {
3051 chan->omtu = mtu;
3052 set_bit(CONF_MTU_DONE, &chan->conf_state);
3053 }
3054 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3055
3056 if (remote_efs) {
3057 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3058 efs.stype != L2CAP_SERV_NOTRAFIC &&
3059 efs.stype != chan->local_stype) {
3060
3061 result = L2CAP_CONF_UNACCEPT;
3062
3063 if (chan->num_conf_req >= 1)
3064 return -ECONNREFUSED;
3065
3066 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3067 sizeof(efs),
3068 (unsigned long) &efs);
3069 } else {
3070 /* Send PENDING Conf Rsp */
3071 result = L2CAP_CONF_PENDING;
3072 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3073 }
3074 }
3075
3076 switch (rfc.mode) {
3077 case L2CAP_MODE_BASIC:
3078 chan->fcs = L2CAP_FCS_NONE;
3079 set_bit(CONF_MODE_DONE, &chan->conf_state);
3080 break;
3081
3082 case L2CAP_MODE_ERTM:
3083 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3084 chan->remote_tx_win = rfc.txwin_size;
3085 else
3086 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3087
3088 chan->remote_max_tx = rfc.max_transmit;
3089
3090 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3091 chan->conn->mtu -
3092 L2CAP_EXT_HDR_SIZE -
3093 L2CAP_SDULEN_SIZE -
3094 L2CAP_FCS_SIZE);
3095 rfc.max_pdu_size = cpu_to_le16(size);
3096 chan->remote_mps = size;
3097
3098 rfc.retrans_timeout =
3099 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3100 rfc.monitor_timeout =
3101 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3102
3103 set_bit(CONF_MODE_DONE, &chan->conf_state);
3104
3105 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3106 sizeof(rfc), (unsigned long) &rfc);
3107
3108 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3109 chan->remote_id = efs.id;
3110 chan->remote_stype = efs.stype;
3111 chan->remote_msdu = le16_to_cpu(efs.msdu);
3112 chan->remote_flush_to =
3113 le32_to_cpu(efs.flush_to);
3114 chan->remote_acc_lat =
3115 le32_to_cpu(efs.acc_lat);
3116 chan->remote_sdu_itime =
3117 le32_to_cpu(efs.sdu_itime);
3118 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3119 sizeof(efs), (unsigned long) &efs);
3120 }
3121 break;
3122
3123 case L2CAP_MODE_STREAMING:
3124 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3125 chan->conn->mtu -
3126 L2CAP_EXT_HDR_SIZE -
3127 L2CAP_SDULEN_SIZE -
3128 L2CAP_FCS_SIZE);
3129 rfc.max_pdu_size = cpu_to_le16(size);
3130 chan->remote_mps = size;
3131
3132 set_bit(CONF_MODE_DONE, &chan->conf_state);
3133
3134 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3135 sizeof(rfc), (unsigned long) &rfc);
3136
3137 break;
3138
3139 default:
3140 result = L2CAP_CONF_UNACCEPT;
3141
3142 memset(&rfc, 0, sizeof(rfc));
3143 rfc.mode = chan->mode;
3144 }
3145
3146 if (result == L2CAP_CONF_SUCCESS)
3147 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3148 }
3149 rsp->scid = cpu_to_le16(chan->dcid);
3150 rsp->result = cpu_to_le16(result);
3151 rsp->flags = cpu_to_le16(0x0000);
3152
3153 return ptr - data;
3154}
3155
3156static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3157{
3158 struct l2cap_conf_req *req = data;
3159 void *ptr = req->data;
3160 int type, olen;
3161 unsigned long val;
3162 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3163 struct l2cap_conf_efs efs;
3164
3165 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3166
3167 while (len >= L2CAP_CONF_OPT_SIZE) {
3168 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3169
3170 switch (type) {
3171 case L2CAP_CONF_MTU:
3172 if (val < L2CAP_DEFAULT_MIN_MTU) {
3173 *result = L2CAP_CONF_UNACCEPT;
3174 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3175 } else
3176 chan->imtu = val;
3177 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3178 break;
3179
3180 case L2CAP_CONF_FLUSH_TO:
3181 chan->flush_to = val;
3182 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3183 2, chan->flush_to);
3184 break;
3185
3186 case L2CAP_CONF_RFC:
3187 if (olen == sizeof(rfc))
3188 memcpy(&rfc, (void *)val, olen);
3189
3190 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3191 rfc.mode != chan->mode)
3192 return -ECONNREFUSED;
3193
3194 chan->fcs = 0;
3195
3196 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3197 sizeof(rfc), (unsigned long) &rfc);
3198 break;
3199
3200 case L2CAP_CONF_EWS:
3201 chan->tx_win = min_t(u16, val,
3202 L2CAP_DEFAULT_EXT_WINDOW);
3203 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3204 chan->tx_win);
3205 break;
3206
3207 case L2CAP_CONF_EFS:
3208 if (olen == sizeof(efs))
3209 memcpy(&efs, (void *)val, olen);
3210
3211 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3212 efs.stype != L2CAP_SERV_NOTRAFIC &&
3213 efs.stype != chan->local_stype)
3214 return -ECONNREFUSED;
3215
3216 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3217 sizeof(efs), (unsigned long) &efs);
3218 break;
3219 }
3220 }
3221
3222 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3223 return -ECONNREFUSED;
3224
3225 chan->mode = rfc.mode;
3226
3227 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3228 switch (rfc.mode) {
3229 case L2CAP_MODE_ERTM:
3230 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3231 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3232 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3233
3234 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3235 chan->local_msdu = le16_to_cpu(efs.msdu);
3236 chan->local_sdu_itime =
3237 le32_to_cpu(efs.sdu_itime);
3238 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3239 chan->local_flush_to =
3240 le32_to_cpu(efs.flush_to);
3241 }
3242 break;
3243
3244 case L2CAP_MODE_STREAMING:
3245 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3246 }
3247 }
3248
3249 req->dcid = cpu_to_le16(chan->dcid);
3250 req->flags = cpu_to_le16(0x0000);
3251
3252 return ptr - data;
3253}
3254
3255static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3256{
3257 struct l2cap_conf_rsp *rsp = data;
3258 void *ptr = rsp->data;
3259
3260 BT_DBG("chan %p", chan);
3261
3262 rsp->scid = cpu_to_le16(chan->dcid);
3263 rsp->result = cpu_to_le16(result);
3264 rsp->flags = cpu_to_le16(flags);
3265
3266 return ptr - data;
3267}
3268
3269void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3270{
3271 struct l2cap_conn_rsp rsp;
3272 struct l2cap_conn *conn = chan->conn;
3273 u8 buf[128];
3274
3275 rsp.scid = cpu_to_le16(chan->dcid);
3276 rsp.dcid = cpu_to_le16(chan->scid);
3277 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3278 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3279 l2cap_send_cmd(conn, chan->ident,
3280 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3281
3282 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3283 return;
3284
3285 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3286 l2cap_build_conf_req(chan, buf), buf);
3287 chan->num_conf_req++;
3288}
3289
3290static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3291{
3292 int type, olen;
3293 unsigned long val;
3294 struct l2cap_conf_rfc rfc;
3295
3296 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3297
3298 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3299 return;
3300
3301 while (len >= L2CAP_CONF_OPT_SIZE) {
3302 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3303
3304 switch (type) {
3305 case L2CAP_CONF_RFC:
3306 if (olen == sizeof(rfc))
3307 memcpy(&rfc, (void *)val, olen);
3308 goto done;
3309 }
3310 }
3311
3312 /* Use sane default values in case a misbehaving remote device
3313 * did not send an RFC option.
3314 */
3315 rfc.mode = chan->mode;
3316 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3317 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3318 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3319
3320 BT_ERR("Expected RFC option was not found, using defaults");
3321
3322done:
3323 switch (rfc.mode) {
3324 case L2CAP_MODE_ERTM:
3325 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3326 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3327 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3328 break;
3329 case L2CAP_MODE_STREAMING:
3330 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3331 }
3332}
3333
3334static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3335{
3336 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3337
3338 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3339 return 0;
3340
3341 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3342 cmd->ident == conn->info_ident) {
3343 cancel_delayed_work(&conn->info_timer);
3344
3345 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3346 conn->info_ident = 0;
3347
3348 l2cap_conn_start(conn);
3349 }
3350
3351 return 0;
3352}
3353
3354static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3355{
3356 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3357 struct l2cap_conn_rsp rsp;
3358 struct l2cap_chan *chan = NULL, *pchan;
3359 struct sock *parent, *sk = NULL;
3360 int result, status = L2CAP_CS_NO_INFO;
3361
3362 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3363 __le16 psm = req->psm;
3364
3365 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3366
3367 /* Check if we have socket listening on psm */
3368 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3369 if (!pchan) {
3370 result = L2CAP_CR_BAD_PSM;
3371 goto sendresp;
3372 }
3373
3374 parent = pchan->sk;
3375
3376 mutex_lock(&conn->chan_lock);
3377 lock_sock(parent);
3378
3379 /* Check if the ACL is secure enough (if not SDP) */
3380 if (psm != cpu_to_le16(0x0001) &&
3381 !hci_conn_check_link_mode(conn->hcon)) {
3382 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3383 result = L2CAP_CR_SEC_BLOCK;
3384 goto response;
3385 }
3386
3387 result = L2CAP_CR_NO_MEM;
3388
3389 /* Check for backlog size */
3390 if (sk_acceptq_is_full(parent)) {
3391 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3392 goto response;
3393 }
3394
3395 chan = pchan->ops->new_connection(pchan->data);
3396 if (!chan)
3397 goto response;
3398
3399 sk = chan->sk;
3400
3401 /* Check if we already have channel with that dcid */
3402 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3403 sock_set_flag(sk, SOCK_ZAPPED);
3404 chan->ops->close(chan->data);
3405 goto response;
3406 }
3407
3408 hci_conn_hold(conn->hcon);
3409
3410 bacpy(&bt_sk(sk)->src, conn->src);
3411 bacpy(&bt_sk(sk)->dst, conn->dst);
3412 chan->psm = psm;
3413 chan->dcid = scid;
3414
3415 bt_accept_enqueue(parent, sk);
3416
3417 __l2cap_chan_add(conn, chan);
3418
3419 dcid = chan->scid;
3420
3421 __set_chan_timer(chan, sk->sk_sndtimeo);
3422
3423 chan->ident = cmd->ident;
3424
3425 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3426 if (l2cap_chan_check_security(chan)) {
3427 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3428 __l2cap_state_change(chan, BT_CONNECT2);
3429 result = L2CAP_CR_PEND;
3430 status = L2CAP_CS_AUTHOR_PEND;
3431 parent->sk_data_ready(parent, 0);
3432 } else {
3433 __l2cap_state_change(chan, BT_CONFIG);
3434 result = L2CAP_CR_SUCCESS;
3435 status = L2CAP_CS_NO_INFO;
3436 }
3437 } else {
3438 __l2cap_state_change(chan, BT_CONNECT2);
3439 result = L2CAP_CR_PEND;
3440 status = L2CAP_CS_AUTHEN_PEND;
3441 }
3442 } else {
3443 __l2cap_state_change(chan, BT_CONNECT2);
3444 result = L2CAP_CR_PEND;
3445 status = L2CAP_CS_NO_INFO;
3446 }
3447
3448response:
3449 release_sock(parent);
3450 mutex_unlock(&conn->chan_lock);
3451
3452sendresp:
3453 rsp.scid = cpu_to_le16(scid);
3454 rsp.dcid = cpu_to_le16(dcid);
3455 rsp.result = cpu_to_le16(result);
3456 rsp.status = cpu_to_le16(status);
3457 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3458
3459 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3460 struct l2cap_info_req info;
3461 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3462
3463 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3464 conn->info_ident = l2cap_get_ident(conn);
3465
3466 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3467
3468 l2cap_send_cmd(conn, conn->info_ident,
3469 L2CAP_INFO_REQ, sizeof(info), &info);
3470 }
3471
3472 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3473 result == L2CAP_CR_SUCCESS) {
3474 u8 buf[128];
3475 set_bit(CONF_REQ_SENT, &chan->conf_state);
3476 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3477 l2cap_build_conf_req(chan, buf), buf);
3478 chan->num_conf_req++;
3479 }
3480
3481 return 0;
3482}
3483
3484static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3485{
3486 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3487 u16 scid, dcid, result, status;
3488 struct l2cap_chan *chan;
3489 u8 req[128];
3490 int err;
3491
3492 scid = __le16_to_cpu(rsp->scid);
3493 dcid = __le16_to_cpu(rsp->dcid);
3494 result = __le16_to_cpu(rsp->result);
3495 status = __le16_to_cpu(rsp->status);
3496
3497 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3498 dcid, scid, result, status);
3499
3500 mutex_lock(&conn->chan_lock);
3501
3502 if (scid) {
3503 chan = __l2cap_get_chan_by_scid(conn, scid);
3504 if (!chan) {
3505 err = -EFAULT;
3506 goto unlock;
3507 }
3508 } else {
3509 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3510 if (!chan) {
3511 err = -EFAULT;
3512 goto unlock;
3513 }
3514 }
3515
3516 err = 0;
3517
3518 l2cap_chan_lock(chan);
3519
3520 switch (result) {
3521 case L2CAP_CR_SUCCESS:
3522 l2cap_state_change(chan, BT_CONFIG);
3523 chan->ident = 0;
3524 chan->dcid = dcid;
3525 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3526
3527 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3528 break;
3529
3530 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3531 l2cap_build_conf_req(chan, req), req);
3532 chan->num_conf_req++;
3533 break;
3534
3535 case L2CAP_CR_PEND:
3536 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3537 break;
3538
3539 default:
3540 l2cap_chan_del(chan, ECONNREFUSED);
3541 break;
3542 }
3543
3544 l2cap_chan_unlock(chan);
3545
3546unlock:
3547 mutex_unlock(&conn->chan_lock);
3548
3549 return err;
3550}
3551
3552static inline void set_default_fcs(struct l2cap_chan *chan)
3553{
3554 /* FCS is enabled only in ERTM or streaming mode, if one or both
3555 * sides request it.
3556 */
3557 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3558 chan->fcs = L2CAP_FCS_NONE;
3559 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3560 chan->fcs = L2CAP_FCS_CRC16;
3561}
3562
3563static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3564{
3565 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3566 u16 dcid, flags;
3567 u8 rsp[64];
3568 struct l2cap_chan *chan;
3569 int len, err = 0;
3570
3571 dcid = __le16_to_cpu(req->dcid);
3572 flags = __le16_to_cpu(req->flags);
3573
3574 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3575
3576 chan = l2cap_get_chan_by_scid(conn, dcid);
3577 if (!chan)
3578 return -ENOENT;
3579
3580 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3581 struct l2cap_cmd_rej_cid rej;
3582
3583 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3584 rej.scid = cpu_to_le16(chan->scid);
3585 rej.dcid = cpu_to_le16(chan->dcid);
3586
3587 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3588 sizeof(rej), &rej);
3589 goto unlock;
3590 }
3591
3592 /* Reject if config buffer is too small. */
3593 len = cmd_len - sizeof(*req);
3594 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3595 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3596 l2cap_build_conf_rsp(chan, rsp,
3597 L2CAP_CONF_REJECT, flags), rsp);
3598 goto unlock;
3599 }
3600
3601 /* Store config. */
3602 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3603 chan->conf_len += len;
3604
3605 if (flags & 0x0001) {
3606 /* Incomplete config. Send empty response. */
3607 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3608 l2cap_build_conf_rsp(chan, rsp,
3609 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3610 goto unlock;
3611 }
3612
3613 /* Complete config. */
3614 len = l2cap_parse_conf_req(chan, rsp);
3615 if (len < 0) {
3616 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3617 goto unlock;
3618 }
3619
3620 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3621 chan->num_conf_rsp++;
3622
3623 /* Reset config buffer. */
3624 chan->conf_len = 0;
3625
3626 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3627 goto unlock;
3628
3629 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3630 set_default_fcs(chan);
3631
3632 l2cap_state_change(chan, BT_CONNECTED);
3633
3634 if (chan->mode == L2CAP_MODE_ERTM ||
3635 chan->mode == L2CAP_MODE_STREAMING)
3636 err = l2cap_ertm_init(chan);
3637
3638 if (err < 0)
3639 l2cap_send_disconn_req(chan->conn, chan, -err);
3640 else
3641 l2cap_chan_ready(chan);
3642
3643 goto unlock;
3644 }
3645
3646 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3647 u8 buf[64];
3648 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3649 l2cap_build_conf_req(chan, buf), buf);
3650 chan->num_conf_req++;
3651 }
3652
3653 /* Got Conf Rsp PENDING from remote side and asume we sent
3654 Conf Rsp PENDING in the code above */
3655 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3656 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3657
3658 /* check compatibility */
3659
3660 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3661 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3662
3663 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3664 l2cap_build_conf_rsp(chan, rsp,
3665 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3666 }
3667
3668unlock:
3669 l2cap_chan_unlock(chan);
3670 return err;
3671}
3672
3673static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3674{
3675 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3676 u16 scid, flags, result;
3677 struct l2cap_chan *chan;
3678 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3679 int err = 0;
3680
3681 scid = __le16_to_cpu(rsp->scid);
3682 flags = __le16_to_cpu(rsp->flags);
3683 result = __le16_to_cpu(rsp->result);
3684
3685 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3686 result, len);
3687
3688 chan = l2cap_get_chan_by_scid(conn, scid);
3689 if (!chan)
3690 return 0;
3691
3692 switch (result) {
3693 case L2CAP_CONF_SUCCESS:
3694 l2cap_conf_rfc_get(chan, rsp->data, len);
3695 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3696 break;
3697
3698 case L2CAP_CONF_PENDING:
3699 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3700
3701 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3702 char buf[64];
3703
3704 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3705 buf, &result);
3706 if (len < 0) {
3707 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3708 goto done;
3709 }
3710
3711 /* check compatibility */
3712
3713 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3714 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3715
3716 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3717 l2cap_build_conf_rsp(chan, buf,
3718 L2CAP_CONF_SUCCESS, 0x0000), buf);
3719 }
3720 goto done;
3721
3722 case L2CAP_CONF_UNACCEPT:
3723 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3724 char req[64];
3725
3726 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3727 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3728 goto done;
3729 }
3730
3731 /* throw out any old stored conf requests */
3732 result = L2CAP_CONF_SUCCESS;
3733 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3734 req, &result);
3735 if (len < 0) {
3736 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3737 goto done;
3738 }
3739
3740 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3741 L2CAP_CONF_REQ, len, req);
3742 chan->num_conf_req++;
3743 if (result != L2CAP_CONF_SUCCESS)
3744 goto done;
3745 break;
3746 }
3747
3748 default:
3749 l2cap_chan_set_err(chan, ECONNRESET);
3750
3751 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3752 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3753 goto done;
3754 }
3755
3756 if (flags & 0x01)
3757 goto done;
3758
3759 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3760
3761 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3762 set_default_fcs(chan);
3763
3764 l2cap_state_change(chan, BT_CONNECTED);
3765 if (chan->mode == L2CAP_MODE_ERTM ||
3766 chan->mode == L2CAP_MODE_STREAMING)
3767 err = l2cap_ertm_init(chan);
3768
3769 if (err < 0)
3770 l2cap_send_disconn_req(chan->conn, chan, -err);
3771 else
3772 l2cap_chan_ready(chan);
3773 }
3774
3775done:
3776 l2cap_chan_unlock(chan);
3777 return err;
3778}
3779
3780static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3781{
3782 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3783 struct l2cap_disconn_rsp rsp;
3784 u16 dcid, scid;
3785 struct l2cap_chan *chan;
3786 struct sock *sk;
3787
3788 scid = __le16_to_cpu(req->scid);
3789 dcid = __le16_to_cpu(req->dcid);
3790
3791 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3792
3793 mutex_lock(&conn->chan_lock);
3794
3795 chan = __l2cap_get_chan_by_scid(conn, dcid);
3796 if (!chan) {
3797 mutex_unlock(&conn->chan_lock);
3798 return 0;
3799 }
3800
3801 l2cap_chan_lock(chan);
3802
3803 sk = chan->sk;
3804
3805 rsp.dcid = cpu_to_le16(chan->scid);
3806 rsp.scid = cpu_to_le16(chan->dcid);
3807 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3808
3809 lock_sock(sk);
3810 sk->sk_shutdown = SHUTDOWN_MASK;
3811 release_sock(sk);
3812
3813 l2cap_chan_hold(chan);
3814 l2cap_chan_del(chan, ECONNRESET);
3815
3816 l2cap_chan_unlock(chan);
3817
3818 chan->ops->close(chan->data);
3819 l2cap_chan_put(chan);
3820
3821 mutex_unlock(&conn->chan_lock);
3822
3823 return 0;
3824}
3825
3826static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3827{
3828 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3829 u16 dcid, scid;
3830 struct l2cap_chan *chan;
3831
3832 scid = __le16_to_cpu(rsp->scid);
3833 dcid = __le16_to_cpu(rsp->dcid);
3834
3835 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3836
3837 mutex_lock(&conn->chan_lock);
3838
3839 chan = __l2cap_get_chan_by_scid(conn, scid);
3840 if (!chan) {
3841 mutex_unlock(&conn->chan_lock);
3842 return 0;
3843 }
3844
3845 l2cap_chan_lock(chan);
3846
3847 l2cap_chan_hold(chan);
3848 l2cap_chan_del(chan, 0);
3849
3850 l2cap_chan_unlock(chan);
3851
3852 chan->ops->close(chan->data);
3853 l2cap_chan_put(chan);
3854
3855 mutex_unlock(&conn->chan_lock);
3856
3857 return 0;
3858}
3859
3860static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3861{
3862 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3863 u16 type;
3864
3865 type = __le16_to_cpu(req->type);
3866
3867 BT_DBG("type 0x%4.4x", type);
3868
3869 if (type == L2CAP_IT_FEAT_MASK) {
3870 u8 buf[8];
3871 u32 feat_mask = l2cap_feat_mask;
3872 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3873 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3874 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3875 if (!disable_ertm)
3876 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3877 | L2CAP_FEAT_FCS;
3878 if (enable_hs)
3879 feat_mask |= L2CAP_FEAT_EXT_FLOW
3880 | L2CAP_FEAT_EXT_WINDOW;
3881
3882 put_unaligned_le32(feat_mask, rsp->data);
3883 l2cap_send_cmd(conn, cmd->ident,
3884 L2CAP_INFO_RSP, sizeof(buf), buf);
3885 } else if (type == L2CAP_IT_FIXED_CHAN) {
3886 u8 buf[12];
3887 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3888
3889 if (enable_hs)
3890 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3891 else
3892 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3893
3894 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3895 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3896 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3897 l2cap_send_cmd(conn, cmd->ident,
3898 L2CAP_INFO_RSP, sizeof(buf), buf);
3899 } else {
3900 struct l2cap_info_rsp rsp;
3901 rsp.type = cpu_to_le16(type);
3902 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3903 l2cap_send_cmd(conn, cmd->ident,
3904 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3905 }
3906
3907 return 0;
3908}
3909
3910static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3911{
3912 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3913 u16 type, result;
3914
3915 type = __le16_to_cpu(rsp->type);
3916 result = __le16_to_cpu(rsp->result);
3917
3918 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3919
3920 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3921 if (cmd->ident != conn->info_ident ||
3922 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3923 return 0;
3924
3925 cancel_delayed_work(&conn->info_timer);
3926
3927 if (result != L2CAP_IR_SUCCESS) {
3928 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3929 conn->info_ident = 0;
3930
3931 l2cap_conn_start(conn);
3932
3933 return 0;
3934 }
3935
3936 switch (type) {
3937 case L2CAP_IT_FEAT_MASK:
3938 conn->feat_mask = get_unaligned_le32(rsp->data);
3939
3940 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3941 struct l2cap_info_req req;
3942 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3943
3944 conn->info_ident = l2cap_get_ident(conn);
3945
3946 l2cap_send_cmd(conn, conn->info_ident,
3947 L2CAP_INFO_REQ, sizeof(req), &req);
3948 } else {
3949 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3950 conn->info_ident = 0;
3951
3952 l2cap_conn_start(conn);
3953 }
3954 break;
3955
3956 case L2CAP_IT_FIXED_CHAN:
3957 conn->fixed_chan_mask = rsp->data[0];
3958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3959 conn->info_ident = 0;
3960
3961 l2cap_conn_start(conn);
3962 break;
3963 }
3964
3965 return 0;
3966}
3967
3968static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3969 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3970 void *data)
3971{
3972 struct l2cap_create_chan_req *req = data;
3973 struct l2cap_create_chan_rsp rsp;
3974 u16 psm, scid;
3975
3976 if (cmd_len != sizeof(*req))
3977 return -EPROTO;
3978
3979 if (!enable_hs)
3980 return -EINVAL;
3981
3982 psm = le16_to_cpu(req->psm);
3983 scid = le16_to_cpu(req->scid);
3984
3985 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3986
3987 /* Placeholder: Always reject */
3988 rsp.dcid = 0;
3989 rsp.scid = cpu_to_le16(scid);
3990 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3991 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3992
3993 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3994 sizeof(rsp), &rsp);
3995
3996 return 0;
3997}
3998
3999static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4000 struct l2cap_cmd_hdr *cmd, void *data)
4001{
4002 BT_DBG("conn %p", conn);
4003
4004 return l2cap_connect_rsp(conn, cmd, data);
4005}
4006
4007static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4008 u16 icid, u16 result)
4009{
4010 struct l2cap_move_chan_rsp rsp;
4011
4012 BT_DBG("icid %d, result %d", icid, result);
4013
4014 rsp.icid = cpu_to_le16(icid);
4015 rsp.result = cpu_to_le16(result);
4016
4017 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4018}
4019
4020static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4021 struct l2cap_chan *chan, u16 icid, u16 result)
4022{
4023 struct l2cap_move_chan_cfm cfm;
4024 u8 ident;
4025
4026 BT_DBG("icid %d, result %d", icid, result);
4027
4028 ident = l2cap_get_ident(conn);
4029 if (chan)
4030 chan->ident = ident;
4031
4032 cfm.icid = cpu_to_le16(icid);
4033 cfm.result = cpu_to_le16(result);
4034
4035 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4036}
4037
4038static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4039 u16 icid)
4040{
4041 struct l2cap_move_chan_cfm_rsp rsp;
4042
4043 BT_DBG("icid %d", icid);
4044
4045 rsp.icid = cpu_to_le16(icid);
4046 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4047}
4048
4049static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4050 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4051{
4052 struct l2cap_move_chan_req *req = data;
4053 u16 icid = 0;
4054 u16 result = L2CAP_MR_NOT_ALLOWED;
4055
4056 if (cmd_len != sizeof(*req))
4057 return -EPROTO;
4058
4059 icid = le16_to_cpu(req->icid);
4060
4061 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4062
4063 if (!enable_hs)
4064 return -EINVAL;
4065
4066 /* Placeholder: Always refuse */
4067 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4068
4069 return 0;
4070}
4071
4072static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4073 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4074{
4075 struct l2cap_move_chan_rsp *rsp = data;
4076 u16 icid, result;
4077
4078 if (cmd_len != sizeof(*rsp))
4079 return -EPROTO;
4080
4081 icid = le16_to_cpu(rsp->icid);
4082 result = le16_to_cpu(rsp->result);
4083
4084 BT_DBG("icid %d, result %d", icid, result);
4085
4086 /* Placeholder: Always unconfirmed */
4087 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4088
4089 return 0;
4090}
4091
4092static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4093 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4094{
4095 struct l2cap_move_chan_cfm *cfm = data;
4096 u16 icid, result;
4097
4098 if (cmd_len != sizeof(*cfm))
4099 return -EPROTO;
4100
4101 icid = le16_to_cpu(cfm->icid);
4102 result = le16_to_cpu(cfm->result);
4103
4104 BT_DBG("icid %d, result %d", icid, result);
4105
4106 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4107
4108 return 0;
4109}
4110
4111static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4112 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4113{
4114 struct l2cap_move_chan_cfm_rsp *rsp = data;
4115 u16 icid;
4116
4117 if (cmd_len != sizeof(*rsp))
4118 return -EPROTO;
4119
4120 icid = le16_to_cpu(rsp->icid);
4121
4122 BT_DBG("icid %d", icid);
4123
4124 return 0;
4125}
4126
4127static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4128 u16 to_multiplier)
4129{
4130 u16 max_latency;
4131
4132 if (min > max || min < 6 || max > 3200)
4133 return -EINVAL;
4134
4135 if (to_multiplier < 10 || to_multiplier > 3200)
4136 return -EINVAL;
4137
4138 if (max >= to_multiplier * 8)
4139 return -EINVAL;
4140
4141 max_latency = (to_multiplier * 8 / max) - 1;
4142 if (latency > 499 || latency > max_latency)
4143 return -EINVAL;
4144
4145 return 0;
4146}
4147
4148static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4149 struct l2cap_cmd_hdr *cmd, u8 *data)
4150{
4151 struct hci_conn *hcon = conn->hcon;
4152 struct l2cap_conn_param_update_req *req;
4153 struct l2cap_conn_param_update_rsp rsp;
4154 u16 min, max, latency, to_multiplier, cmd_len;
4155 int err;
4156
4157 if (!(hcon->link_mode & HCI_LM_MASTER))
4158 return -EINVAL;
4159
4160 cmd_len = __le16_to_cpu(cmd->len);
4161 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4162 return -EPROTO;
4163
4164 req = (struct l2cap_conn_param_update_req *) data;
4165 min = __le16_to_cpu(req->min);
4166 max = __le16_to_cpu(req->max);
4167 latency = __le16_to_cpu(req->latency);
4168 to_multiplier = __le16_to_cpu(req->to_multiplier);
4169
4170 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4171 min, max, latency, to_multiplier);
4172
4173 memset(&rsp, 0, sizeof(rsp));
4174
4175 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4176 if (err)
4177 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4178 else
4179 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4180
4181 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4182 sizeof(rsp), &rsp);
4183
4184 if (!err)
4185 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4186
4187 return 0;
4188}
4189
4190static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4191 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4192{
4193 int err = 0;
4194
4195 switch (cmd->code) {
4196 case L2CAP_COMMAND_REJ:
4197 l2cap_command_rej(conn, cmd, data);
4198 break;
4199
4200 case L2CAP_CONN_REQ:
4201 err = l2cap_connect_req(conn, cmd, data);
4202 break;
4203
4204 case L2CAP_CONN_RSP:
4205 err = l2cap_connect_rsp(conn, cmd, data);
4206 break;
4207
4208 case L2CAP_CONF_REQ:
4209 err = l2cap_config_req(conn, cmd, cmd_len, data);
4210 break;
4211
4212 case L2CAP_CONF_RSP:
4213 err = l2cap_config_rsp(conn, cmd, data);
4214 break;
4215
4216 case L2CAP_DISCONN_REQ:
4217 err = l2cap_disconnect_req(conn, cmd, data);
4218 break;
4219
4220 case L2CAP_DISCONN_RSP:
4221 err = l2cap_disconnect_rsp(conn, cmd, data);
4222 break;
4223
4224 case L2CAP_ECHO_REQ:
4225 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4226 break;
4227
4228 case L2CAP_ECHO_RSP:
4229 break;
4230
4231 case L2CAP_INFO_REQ:
4232 err = l2cap_information_req(conn, cmd, data);
4233 break;
4234
4235 case L2CAP_INFO_RSP:
4236 err = l2cap_information_rsp(conn, cmd, data);
4237 break;
4238
4239 case L2CAP_CREATE_CHAN_REQ:
4240 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4241 break;
4242
4243 case L2CAP_CREATE_CHAN_RSP:
4244 err = l2cap_create_channel_rsp(conn, cmd, data);
4245 break;
4246
4247 case L2CAP_MOVE_CHAN_REQ:
4248 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4249 break;
4250
4251 case L2CAP_MOVE_CHAN_RSP:
4252 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4253 break;
4254
4255 case L2CAP_MOVE_CHAN_CFM:
4256 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4257 break;
4258
4259 case L2CAP_MOVE_CHAN_CFM_RSP:
4260 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4261 break;
4262
4263 default:
4264 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4265 err = -EINVAL;
4266 break;
4267 }
4268
4269 return err;
4270}
4271
4272static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4273 struct l2cap_cmd_hdr *cmd, u8 *data)
4274{
4275 switch (cmd->code) {
4276 case L2CAP_COMMAND_REJ:
4277 return 0;
4278
4279 case L2CAP_CONN_PARAM_UPDATE_REQ:
4280 return l2cap_conn_param_update_req(conn, cmd, data);
4281
4282 case L2CAP_CONN_PARAM_UPDATE_RSP:
4283 return 0;
4284
4285 default:
4286 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4287 return -EINVAL;
4288 }
4289}
4290
4291static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4292 struct sk_buff *skb)
4293{
4294 u8 *data = skb->data;
4295 int len = skb->len;
4296 struct l2cap_cmd_hdr cmd;
4297 int err;
4298
4299 l2cap_raw_recv(conn, skb);
4300
4301 while (len >= L2CAP_CMD_HDR_SIZE) {
4302 u16 cmd_len;
4303 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4304 data += L2CAP_CMD_HDR_SIZE;
4305 len -= L2CAP_CMD_HDR_SIZE;
4306
4307 cmd_len = le16_to_cpu(cmd.len);
4308
4309 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4310
4311 if (cmd_len > len || !cmd.ident) {
4312 BT_DBG("corrupted command");
4313 break;
4314 }
4315
4316 if (conn->hcon->type == LE_LINK)
4317 err = l2cap_le_sig_cmd(conn, &cmd, data);
4318 else
4319 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4320
4321 if (err) {
4322 struct l2cap_cmd_rej_unk rej;
4323
4324 BT_ERR("Wrong link type (%d)", err);
4325
4326 /* FIXME: Map err to a valid reason */
4327 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4328 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4329 }
4330
4331 data += cmd_len;
4332 len -= cmd_len;
4333 }
4334
4335 kfree_skb(skb);
4336}
4337
4338static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4339{
4340 u16 our_fcs, rcv_fcs;
4341 int hdr_size;
4342
4343 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4344 hdr_size = L2CAP_EXT_HDR_SIZE;
4345 else
4346 hdr_size = L2CAP_ENH_HDR_SIZE;
4347
4348 if (chan->fcs == L2CAP_FCS_CRC16) {
4349 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4350 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4351 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4352
4353 if (our_fcs != rcv_fcs)
4354 return -EBADMSG;
4355 }
4356 return 0;
4357}
4358
4359static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4360{
4361 struct l2cap_ctrl control;
4362
4363 BT_DBG("chan %p", chan);
4364
4365 memset(&control, 0, sizeof(control));
4366 control.sframe = 1;
4367 control.final = 1;
4368 control.reqseq = chan->buffer_seq;
4369 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4370
4371 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4372 control.super = L2CAP_SUPER_RNR;
4373 l2cap_send_sframe(chan, &control);
4374 }
4375
4376 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4377 chan->unacked_frames > 0)
4378 __set_retrans_timer(chan);
4379
4380 /* Send pending iframes */
4381 l2cap_ertm_send(chan);
4382
4383 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4384 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4385 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4386 * send it now.
4387 */
4388 control.super = L2CAP_SUPER_RR;
4389 l2cap_send_sframe(chan, &control);
4390 }
4391}
4392
4393static void append_skb_frag(struct sk_buff *skb,
4394 struct sk_buff *new_frag, struct sk_buff **last_frag)
4395{
4396 /* skb->len reflects data in skb as well as all fragments
4397 * skb->data_len reflects only data in fragments
4398 */
4399 if (!skb_has_frag_list(skb))
4400 skb_shinfo(skb)->frag_list = new_frag;
4401
4402 new_frag->next = NULL;
4403
4404 (*last_frag)->next = new_frag;
4405 *last_frag = new_frag;
4406
4407 skb->len += new_frag->len;
4408 skb->data_len += new_frag->len;
4409 skb->truesize += new_frag->truesize;
4410}
4411
4412static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4413 struct l2cap_ctrl *control)
4414{
4415 int err = -EINVAL;
4416
4417 switch (control->sar) {
4418 case L2CAP_SAR_UNSEGMENTED:
4419 if (chan->sdu)
4420 break;
4421
4422 err = chan->ops->recv(chan->data, skb);
4423 break;
4424
4425 case L2CAP_SAR_START:
4426 if (chan->sdu)
4427 break;
4428
4429 chan->sdu_len = get_unaligned_le16(skb->data);
4430 skb_pull(skb, L2CAP_SDULEN_SIZE);
4431
4432 if (chan->sdu_len > chan->imtu) {
4433 err = -EMSGSIZE;
4434 break;
4435 }
4436
4437 if (skb->len >= chan->sdu_len)
4438 break;
4439
4440 chan->sdu = skb;
4441 chan->sdu_last_frag = skb;
4442
4443 skb = NULL;
4444 err = 0;
4445 break;
4446
4447 case L2CAP_SAR_CONTINUE:
4448 if (!chan->sdu)
4449 break;
4450
4451 append_skb_frag(chan->sdu, skb,
4452 &chan->sdu_last_frag);
4453 skb = NULL;
4454
4455 if (chan->sdu->len >= chan->sdu_len)
4456 break;
4457
4458 err = 0;
4459 break;
4460
4461 case L2CAP_SAR_END:
4462 if (!chan->sdu)
4463 break;
4464
4465 append_skb_frag(chan->sdu, skb,
4466 &chan->sdu_last_frag);
4467 skb = NULL;
4468
4469 if (chan->sdu->len != chan->sdu_len)
4470 break;
4471
4472 err = chan->ops->recv(chan->data, chan->sdu);
4473
4474 if (!err) {
4475 /* Reassembly complete */
4476 chan->sdu = NULL;
4477 chan->sdu_last_frag = NULL;
4478 chan->sdu_len = 0;
4479 }
4480 break;
4481 }
4482
4483 if (err) {
4484 kfree_skb(skb);
4485 kfree_skb(chan->sdu);
4486 chan->sdu = NULL;
4487 chan->sdu_last_frag = NULL;
4488 chan->sdu_len = 0;
4489 }
4490
4491 return err;
4492}
4493
4494void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4495{
4496 u8 event;
4497
4498 if (chan->mode != L2CAP_MODE_ERTM)
4499 return;
4500
4501 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4502 l2cap_tx(chan, 0, 0, event);
4503}
4504
4505static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4506{
4507 /* Placeholder */
4508 return 0;
4509}
4510
4511static void l2cap_handle_srej(struct l2cap_chan *chan,
4512 struct l2cap_ctrl *control)
4513{
4514 /* Placeholder */
4515}
4516
4517static void l2cap_handle_rej(struct l2cap_chan *chan,
4518 struct l2cap_ctrl *control)
4519{
4520 /* Placeholder */
4521}
4522
4523static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4524{
4525 BT_DBG("chan %p, txseq %d", chan, txseq);
4526
4527 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4528 chan->expected_tx_seq);
4529
4530 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4531 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4532 chan->tx_win) {
4533 /* See notes below regarding "double poll" and
4534 * invalid packets.
4535 */
4536 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4537 BT_DBG("Invalid/Ignore - after SREJ");
4538 return L2CAP_TXSEQ_INVALID_IGNORE;
4539 } else {
4540 BT_DBG("Invalid - in window after SREJ sent");
4541 return L2CAP_TXSEQ_INVALID;
4542 }
4543 }
4544
4545 if (chan->srej_list.head == txseq) {
4546 BT_DBG("Expected SREJ");
4547 return L2CAP_TXSEQ_EXPECTED_SREJ;
4548 }
4549
4550 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4551 BT_DBG("Duplicate SREJ - txseq already stored");
4552 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4553 }
4554
4555 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4556 BT_DBG("Unexpected SREJ - not requested");
4557 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4558 }
4559 }
4560
4561 if (chan->expected_tx_seq == txseq) {
4562 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4563 chan->tx_win) {
4564 BT_DBG("Invalid - txseq outside tx window");
4565 return L2CAP_TXSEQ_INVALID;
4566 } else {
4567 BT_DBG("Expected");
4568 return L2CAP_TXSEQ_EXPECTED;
4569 }
4570 }
4571
4572 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4573 __seq_offset(chan, chan->expected_tx_seq,
4574 chan->last_acked_seq)){
4575 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4576 return L2CAP_TXSEQ_DUPLICATE;
4577 }
4578
4579 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4580 /* A source of invalid packets is a "double poll" condition,
4581 * where delays cause us to send multiple poll packets. If
4582 * the remote stack receives and processes both polls,
4583 * sequence numbers can wrap around in such a way that a
4584 * resent frame has a sequence number that looks like new data
4585 * with a sequence gap. This would trigger an erroneous SREJ
4586 * request.
4587 *
4588 * Fortunately, this is impossible with a tx window that's
4589 * less than half of the maximum sequence number, which allows
4590 * invalid frames to be safely ignored.
4591 *
4592 * With tx window sizes greater than half of the tx window
4593 * maximum, the frame is invalid and cannot be ignored. This
4594 * causes a disconnect.
4595 */
4596
4597 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4598 BT_DBG("Invalid/Ignore - txseq outside tx window");
4599 return L2CAP_TXSEQ_INVALID_IGNORE;
4600 } else {
4601 BT_DBG("Invalid - txseq outside tx window");
4602 return L2CAP_TXSEQ_INVALID;
4603 }
4604 } else {
4605 BT_DBG("Unexpected - txseq indicates missing frames");
4606 return L2CAP_TXSEQ_UNEXPECTED;
4607 }
4608}
4609
4610static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4611 struct l2cap_ctrl *control,
4612 struct sk_buff *skb, u8 event)
4613{
4614 int err = 0;
4615 bool skb_in_use = 0;
4616
4617 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4618 event);
4619
4620 switch (event) {
4621 case L2CAP_EV_RECV_IFRAME:
4622 switch (l2cap_classify_txseq(chan, control->txseq)) {
4623 case L2CAP_TXSEQ_EXPECTED:
4624 l2cap_pass_to_tx(chan, control);
4625
4626 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4627 BT_DBG("Busy, discarding expected seq %d",
4628 control->txseq);
4629 break;
4630 }
4631
4632 chan->expected_tx_seq = __next_seq(chan,
4633 control->txseq);
4634
4635 chan->buffer_seq = chan->expected_tx_seq;
4636 skb_in_use = 1;
4637
4638 err = l2cap_reassemble_sdu(chan, skb, control);
4639 if (err)
4640 break;
4641
4642 if (control->final) {
4643 if (!test_and_clear_bit(CONN_REJ_ACT,
4644 &chan->conn_state)) {
4645 control->final = 0;
4646 l2cap_retransmit_all(chan, control);
4647 l2cap_ertm_send(chan);
4648 }
4649 }
4650
4651 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4652 l2cap_send_ack(chan);
4653 break;
4654 case L2CAP_TXSEQ_UNEXPECTED:
4655 l2cap_pass_to_tx(chan, control);
4656
4657 /* Can't issue SREJ frames in the local busy state.
4658 * Drop this frame, it will be seen as missing
4659 * when local busy is exited.
4660 */
4661 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4662 BT_DBG("Busy, discarding unexpected seq %d",
4663 control->txseq);
4664 break;
4665 }
4666
4667 /* There was a gap in the sequence, so an SREJ
4668 * must be sent for each missing frame. The
4669 * current frame is stored for later use.
4670 */
4671 skb_queue_tail(&chan->srej_q, skb);
4672 skb_in_use = 1;
4673 BT_DBG("Queued %p (queue len %d)", skb,
4674 skb_queue_len(&chan->srej_q));
4675
4676 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4677 l2cap_seq_list_clear(&chan->srej_list);
4678 l2cap_send_srej(chan, control->txseq);
4679
4680 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4681 break;
4682 case L2CAP_TXSEQ_DUPLICATE:
4683 l2cap_pass_to_tx(chan, control);
4684 break;
4685 case L2CAP_TXSEQ_INVALID_IGNORE:
4686 break;
4687 case L2CAP_TXSEQ_INVALID:
4688 default:
4689 l2cap_send_disconn_req(chan->conn, chan,
4690 ECONNRESET);
4691 break;
4692 }
4693 break;
4694 case L2CAP_EV_RECV_RR:
4695 l2cap_pass_to_tx(chan, control);
4696 if (control->final) {
4697 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4698
4699 if (!test_and_clear_bit(CONN_REJ_ACT,
4700 &chan->conn_state)) {
4701 control->final = 0;
4702 l2cap_retransmit_all(chan, control);
4703 }
4704
4705 l2cap_ertm_send(chan);
4706 } else if (control->poll) {
4707 l2cap_send_i_or_rr_or_rnr(chan);
4708 } else {
4709 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4710 &chan->conn_state) &&
4711 chan->unacked_frames)
4712 __set_retrans_timer(chan);
4713
4714 l2cap_ertm_send(chan);
4715 }
4716 break;
4717 case L2CAP_EV_RECV_RNR:
4718 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4719 l2cap_pass_to_tx(chan, control);
4720 if (control && control->poll) {
4721 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4722 l2cap_send_rr_or_rnr(chan, 0);
4723 }
4724 __clear_retrans_timer(chan);
4725 l2cap_seq_list_clear(&chan->retrans_list);
4726 break;
4727 case L2CAP_EV_RECV_REJ:
4728 l2cap_handle_rej(chan, control);
4729 break;
4730 case L2CAP_EV_RECV_SREJ:
4731 l2cap_handle_srej(chan, control);
4732 break;
4733 default:
4734 break;
4735 }
4736
4737 if (skb && !skb_in_use) {
4738 BT_DBG("Freeing %p", skb);
4739 kfree_skb(skb);
4740 }
4741
4742 return err;
4743}
4744
4745static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4746 struct l2cap_ctrl *control,
4747 struct sk_buff *skb, u8 event)
4748{
4749 int err = 0;
4750 u16 txseq = control->txseq;
4751 bool skb_in_use = 0;
4752
4753 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4754 event);
4755
4756 switch (event) {
4757 case L2CAP_EV_RECV_IFRAME:
4758 switch (l2cap_classify_txseq(chan, txseq)) {
4759 case L2CAP_TXSEQ_EXPECTED:
4760 /* Keep frame for reassembly later */
4761 l2cap_pass_to_tx(chan, control);
4762 skb_queue_tail(&chan->srej_q, skb);
4763 skb_in_use = 1;
4764 BT_DBG("Queued %p (queue len %d)", skb,
4765 skb_queue_len(&chan->srej_q));
4766
4767 chan->expected_tx_seq = __next_seq(chan, txseq);
4768 break;
4769 case L2CAP_TXSEQ_EXPECTED_SREJ:
4770 l2cap_seq_list_pop(&chan->srej_list);
4771
4772 l2cap_pass_to_tx(chan, control);
4773 skb_queue_tail(&chan->srej_q, skb);
4774 skb_in_use = 1;
4775 BT_DBG("Queued %p (queue len %d)", skb,
4776 skb_queue_len(&chan->srej_q));
4777
4778 err = l2cap_rx_queued_iframes(chan);
4779 if (err)
4780 break;
4781
4782 break;
4783 case L2CAP_TXSEQ_UNEXPECTED:
4784 /* Got a frame that can't be reassembled yet.
4785 * Save it for later, and send SREJs to cover
4786 * the missing frames.
4787 */
4788 skb_queue_tail(&chan->srej_q, skb);
4789 skb_in_use = 1;
4790 BT_DBG("Queued %p (queue len %d)", skb,
4791 skb_queue_len(&chan->srej_q));
4792
4793 l2cap_pass_to_tx(chan, control);
4794 l2cap_send_srej(chan, control->txseq);
4795 break;
4796 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4797 /* This frame was requested with an SREJ, but
4798 * some expected retransmitted frames are
4799 * missing. Request retransmission of missing
4800 * SREJ'd frames.
4801 */
4802 skb_queue_tail(&chan->srej_q, skb);
4803 skb_in_use = 1;
4804 BT_DBG("Queued %p (queue len %d)", skb,
4805 skb_queue_len(&chan->srej_q));
4806
4807 l2cap_pass_to_tx(chan, control);
4808 l2cap_send_srej_list(chan, control->txseq);
4809 break;
4810 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4811 /* We've already queued this frame. Drop this copy. */
4812 l2cap_pass_to_tx(chan, control);
4813 break;
4814 case L2CAP_TXSEQ_DUPLICATE:
4815 /* Expecting a later sequence number, so this frame
4816 * was already received. Ignore it completely.
4817 */
4818 break;
4819 case L2CAP_TXSEQ_INVALID_IGNORE:
4820 break;
4821 case L2CAP_TXSEQ_INVALID:
4822 default:
4823 l2cap_send_disconn_req(chan->conn, chan,
4824 ECONNRESET);
4825 break;
4826 }
4827 break;
4828 case L2CAP_EV_RECV_RR:
4829 l2cap_pass_to_tx(chan, control);
4830 if (control->final) {
4831 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4832
4833 if (!test_and_clear_bit(CONN_REJ_ACT,
4834 &chan->conn_state)) {
4835 control->final = 0;
4836 l2cap_retransmit_all(chan, control);
4837 }
4838
4839 l2cap_ertm_send(chan);
4840 } else if (control->poll) {
4841 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4842 &chan->conn_state) &&
4843 chan->unacked_frames) {
4844 __set_retrans_timer(chan);
4845 }
4846
4847 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4848 l2cap_send_srej_tail(chan);
4849 } else {
4850 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4851 &chan->conn_state) &&
4852 chan->unacked_frames)
4853 __set_retrans_timer(chan);
4854
4855 l2cap_send_ack(chan);
4856 }
4857 break;
4858 case L2CAP_EV_RECV_RNR:
4859 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4860 l2cap_pass_to_tx(chan, control);
4861 if (control->poll) {
4862 l2cap_send_srej_tail(chan);
4863 } else {
4864 struct l2cap_ctrl rr_control;
4865 memset(&rr_control, 0, sizeof(rr_control));
4866 rr_control.sframe = 1;
4867 rr_control.super = L2CAP_SUPER_RR;
4868 rr_control.reqseq = chan->buffer_seq;
4869 l2cap_send_sframe(chan, &rr_control);
4870 }
4871
4872 break;
4873 case L2CAP_EV_RECV_REJ:
4874 l2cap_handle_rej(chan, control);
4875 break;
4876 case L2CAP_EV_RECV_SREJ:
4877 l2cap_handle_srej(chan, control);
4878 break;
4879 }
4880
4881 if (skb && !skb_in_use) {
4882 BT_DBG("Freeing %p", skb);
4883 kfree_skb(skb);
4884 }
4885
4886 return err;
4887}
4888
4889static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4890{
4891 /* Make sure reqseq is for a packet that has been sent but not acked */
4892 u16 unacked;
4893
4894 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4895 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4896}
4897
4898static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4899 struct sk_buff *skb, u8 event)
4900{
4901 int err = 0;
4902
4903 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4904 control, skb, event, chan->rx_state);
4905
4906 if (__valid_reqseq(chan, control->reqseq)) {
4907 switch (chan->rx_state) {
4908 case L2CAP_RX_STATE_RECV:
4909 err = l2cap_rx_state_recv(chan, control, skb, event);
4910 break;
4911 case L2CAP_RX_STATE_SREJ_SENT:
4912 err = l2cap_rx_state_srej_sent(chan, control, skb,
4913 event);
4914 break;
4915 default:
4916 /* shut it down */
4917 break;
4918 }
4919 } else {
4920 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4921 control->reqseq, chan->next_tx_seq,
4922 chan->expected_ack_seq);
4923 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4924 }
4925
4926 return err;
4927}
4928
4929static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4930 struct sk_buff *skb)
4931{
4932 int err = 0;
4933
4934 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
4935 chan->rx_state);
4936
4937 if (l2cap_classify_txseq(chan, control->txseq) ==
4938 L2CAP_TXSEQ_EXPECTED) {
4939 l2cap_pass_to_tx(chan, control);
4940
4941 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
4942 __next_seq(chan, chan->buffer_seq));
4943
4944 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4945
4946 l2cap_reassemble_sdu(chan, skb, control);
4947 } else {
4948 if (chan->sdu) {
4949 kfree_skb(chan->sdu);
4950 chan->sdu = NULL;
4951 }
4952 chan->sdu_last_frag = NULL;
4953 chan->sdu_len = 0;
4954
4955 if (skb) {
4956 BT_DBG("Freeing %p", skb);
4957 kfree_skb(skb);
4958 }
4959 }
4960
4961 chan->last_acked_seq = control->txseq;
4962 chan->expected_tx_seq = __next_seq(chan, control->txseq);
4963
4964 return err;
4965}
4966
4967static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4968{
4969 struct l2cap_ctrl *control = &bt_cb(skb)->control;
4970 u16 len;
4971 u8 event;
4972
4973 __unpack_control(chan, skb);
4974
4975 len = skb->len;
4976
4977 /*
4978 * We can just drop the corrupted I-frame here.
4979 * Receiver will miss it and start proper recovery
4980 * procedures and ask for retransmission.
4981 */
4982 if (l2cap_check_fcs(chan, skb))
4983 goto drop;
4984
4985 if (!control->sframe && control->sar == L2CAP_SAR_START)
4986 len -= L2CAP_SDULEN_SIZE;
4987
4988 if (chan->fcs == L2CAP_FCS_CRC16)
4989 len -= L2CAP_FCS_SIZE;
4990
4991 if (len > chan->mps) {
4992 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4993 goto drop;
4994 }
4995
4996 if (!control->sframe) {
4997 int err;
4998
4999 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5000 control->sar, control->reqseq, control->final,
5001 control->txseq);
5002
5003 /* Validate F-bit - F=0 always valid, F=1 only
5004 * valid in TX WAIT_F
5005 */
5006 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5007 goto drop;
5008
5009 if (chan->mode != L2CAP_MODE_STREAMING) {
5010 event = L2CAP_EV_RECV_IFRAME;
5011 err = l2cap_rx(chan, control, skb, event);
5012 } else {
5013 err = l2cap_stream_rx(chan, control, skb);
5014 }
5015
5016 if (err)
5017 l2cap_send_disconn_req(chan->conn, chan,
5018 ECONNRESET);
5019 } else {
5020 const u8 rx_func_to_event[4] = {
5021 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5022 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5023 };
5024
5025 /* Only I-frames are expected in streaming mode */
5026 if (chan->mode == L2CAP_MODE_STREAMING)
5027 goto drop;
5028
5029 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5030 control->reqseq, control->final, control->poll,
5031 control->super);
5032
5033 if (len != 0) {
5034 BT_ERR("%d", len);
5035 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5036 goto drop;
5037 }
5038
5039 /* Validate F and P bits */
5040 if (control->final && (control->poll ||
5041 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5042 goto drop;
5043
5044 event = rx_func_to_event[control->super];
5045 if (l2cap_rx(chan, control, skb, event))
5046 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5047 }
5048
5049 return 0;
5050
5051drop:
5052 kfree_skb(skb);
5053 return 0;
5054}
5055
5056static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
5057{
5058 struct l2cap_chan *chan;
5059
5060 chan = l2cap_get_chan_by_scid(conn, cid);
5061 if (!chan) {
5062 BT_DBG("unknown cid 0x%4.4x", cid);
5063 /* Drop packet and return */
5064 kfree_skb(skb);
5065 return 0;
5066 }
5067
5068 BT_DBG("chan %p, len %d", chan, skb->len);
5069
5070 if (chan->state != BT_CONNECTED)
5071 goto drop;
5072
5073 switch (chan->mode) {
5074 case L2CAP_MODE_BASIC:
5075 /* If socket recv buffers overflows we drop data here
5076 * which is *bad* because L2CAP has to be reliable.
5077 * But we don't have any other choice. L2CAP doesn't
5078 * provide flow control mechanism. */
5079
5080 if (chan->imtu < skb->len)
5081 goto drop;
5082
5083 if (!chan->ops->recv(chan->data, skb))
5084 goto done;
5085 break;
5086
5087 case L2CAP_MODE_ERTM:
5088 case L2CAP_MODE_STREAMING:
5089 l2cap_data_rcv(chan, skb);
5090 goto done;
5091
5092 default:
5093 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5094 break;
5095 }
5096
5097drop:
5098 kfree_skb(skb);
5099
5100done:
5101 l2cap_chan_unlock(chan);
5102
5103 return 0;
5104}
5105
5106static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
5107{
5108 struct l2cap_chan *chan;
5109
5110 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5111 if (!chan)
5112 goto drop;
5113
5114 BT_DBG("chan %p, len %d", chan, skb->len);
5115
5116 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5117 goto drop;
5118
5119 if (chan->imtu < skb->len)
5120 goto drop;
5121
5122 if (!chan->ops->recv(chan->data, skb))
5123 return 0;
5124
5125drop:
5126 kfree_skb(skb);
5127
5128 return 0;
5129}
5130
5131static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5132 struct sk_buff *skb)
5133{
5134 struct l2cap_chan *chan;
5135
5136 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5137 if (!chan)
5138 goto drop;
5139
5140 BT_DBG("chan %p, len %d", chan, skb->len);
5141
5142 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5143 goto drop;
5144
5145 if (chan->imtu < skb->len)
5146 goto drop;
5147
5148 if (!chan->ops->recv(chan->data, skb))
5149 return 0;
5150
5151drop:
5152 kfree_skb(skb);
5153
5154 return 0;
5155}
5156
5157static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5158{
5159 struct l2cap_hdr *lh = (void *) skb->data;
5160 u16 cid, len;
5161 __le16 psm;
5162
5163 skb_pull(skb, L2CAP_HDR_SIZE);
5164 cid = __le16_to_cpu(lh->cid);
5165 len = __le16_to_cpu(lh->len);
5166
5167 if (len != skb->len) {
5168 kfree_skb(skb);
5169 return;
5170 }
5171
5172 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5173
5174 switch (cid) {
5175 case L2CAP_CID_LE_SIGNALING:
5176 case L2CAP_CID_SIGNALING:
5177 l2cap_sig_channel(conn, skb);
5178 break;
5179
5180 case L2CAP_CID_CONN_LESS:
5181 psm = get_unaligned((__le16 *) skb->data);
5182 skb_pull(skb, 2);
5183 l2cap_conless_channel(conn, psm, skb);
5184 break;
5185
5186 case L2CAP_CID_LE_DATA:
5187 l2cap_att_channel(conn, cid, skb);
5188 break;
5189
5190 case L2CAP_CID_SMP:
5191 if (smp_sig_channel(conn, skb))
5192 l2cap_conn_del(conn->hcon, EACCES);
5193 break;
5194
5195 default:
5196 l2cap_data_channel(conn, cid, skb);
5197 break;
5198 }
5199}
5200
5201/* ---- L2CAP interface with lower layer (HCI) ---- */
5202
5203int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5204{
5205 int exact = 0, lm1 = 0, lm2 = 0;
5206 struct l2cap_chan *c;
5207
5208 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5209
5210 /* Find listening sockets and check their link_mode */
5211 read_lock(&chan_list_lock);
5212 list_for_each_entry(c, &chan_list, global_l) {
5213 struct sock *sk = c->sk;
5214
5215 if (c->state != BT_LISTEN)
5216 continue;
5217
5218 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5219 lm1 |= HCI_LM_ACCEPT;
5220 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5221 lm1 |= HCI_LM_MASTER;
5222 exact++;
5223 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5224 lm2 |= HCI_LM_ACCEPT;
5225 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5226 lm2 |= HCI_LM_MASTER;
5227 }
5228 }
5229 read_unlock(&chan_list_lock);
5230
5231 return exact ? lm1 : lm2;
5232}
5233
5234int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5235{
5236 struct l2cap_conn *conn;
5237
5238 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5239
5240 if (!status) {
5241 conn = l2cap_conn_add(hcon, status);
5242 if (conn)
5243 l2cap_conn_ready(conn);
5244 } else
5245 l2cap_conn_del(hcon, bt_to_errno(status));
5246
5247 return 0;
5248}
5249
5250int l2cap_disconn_ind(struct hci_conn *hcon)
5251{
5252 struct l2cap_conn *conn = hcon->l2cap_data;
5253
5254 BT_DBG("hcon %p", hcon);
5255
5256 if (!conn)
5257 return HCI_ERROR_REMOTE_USER_TERM;
5258 return conn->disc_reason;
5259}
5260
5261int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5262{
5263 BT_DBG("hcon %p reason %d", hcon, reason);
5264
5265 l2cap_conn_del(hcon, bt_to_errno(reason));
5266 return 0;
5267}
5268
5269static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5270{
5271 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5272 return;
5273
5274 if (encrypt == 0x00) {
5275 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5276 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5277 } else if (chan->sec_level == BT_SECURITY_HIGH)
5278 l2cap_chan_close(chan, ECONNREFUSED);
5279 } else {
5280 if (chan->sec_level == BT_SECURITY_MEDIUM)
5281 __clear_chan_timer(chan);
5282 }
5283}
5284
5285int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5286{
5287 struct l2cap_conn *conn = hcon->l2cap_data;
5288 struct l2cap_chan *chan;
5289
5290 if (!conn)
5291 return 0;
5292
5293 BT_DBG("conn %p", conn);
5294
5295 if (hcon->type == LE_LINK) {
5296 if (!status && encrypt)
5297 smp_distribute_keys(conn, 0);
5298 cancel_delayed_work(&conn->security_timer);
5299 }
5300
5301 mutex_lock(&conn->chan_lock);
5302
5303 list_for_each_entry(chan, &conn->chan_l, list) {
5304 l2cap_chan_lock(chan);
5305
5306 BT_DBG("chan->scid %d", chan->scid);
5307
5308 if (chan->scid == L2CAP_CID_LE_DATA) {
5309 if (!status && encrypt) {
5310 chan->sec_level = hcon->sec_level;
5311 l2cap_chan_ready(chan);
5312 }
5313
5314 l2cap_chan_unlock(chan);
5315 continue;
5316 }
5317
5318 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5319 l2cap_chan_unlock(chan);
5320 continue;
5321 }
5322
5323 if (!status && (chan->state == BT_CONNECTED ||
5324 chan->state == BT_CONFIG)) {
5325 struct sock *sk = chan->sk;
5326
5327 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5328 sk->sk_state_change(sk);
5329
5330 l2cap_check_encryption(chan, encrypt);
5331 l2cap_chan_unlock(chan);
5332 continue;
5333 }
5334
5335 if (chan->state == BT_CONNECT) {
5336 if (!status) {
5337 l2cap_send_conn_req(chan);
5338 } else {
5339 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5340 }
5341 } else if (chan->state == BT_CONNECT2) {
5342 struct sock *sk = chan->sk;
5343 struct l2cap_conn_rsp rsp;
5344 __u16 res, stat;
5345
5346 lock_sock(sk);
5347
5348 if (!status) {
5349 if (test_bit(BT_SK_DEFER_SETUP,
5350 &bt_sk(sk)->flags)) {
5351 struct sock *parent = bt_sk(sk)->parent;
5352 res = L2CAP_CR_PEND;
5353 stat = L2CAP_CS_AUTHOR_PEND;
5354 if (parent)
5355 parent->sk_data_ready(parent, 0);
5356 } else {
5357 __l2cap_state_change(chan, BT_CONFIG);
5358 res = L2CAP_CR_SUCCESS;
5359 stat = L2CAP_CS_NO_INFO;
5360 }
5361 } else {
5362 __l2cap_state_change(chan, BT_DISCONN);
5363 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5364 res = L2CAP_CR_SEC_BLOCK;
5365 stat = L2CAP_CS_NO_INFO;
5366 }
5367
5368 release_sock(sk);
5369
5370 rsp.scid = cpu_to_le16(chan->dcid);
5371 rsp.dcid = cpu_to_le16(chan->scid);
5372 rsp.result = cpu_to_le16(res);
5373 rsp.status = cpu_to_le16(stat);
5374 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5375 sizeof(rsp), &rsp);
5376 }
5377
5378 l2cap_chan_unlock(chan);
5379 }
5380
5381 mutex_unlock(&conn->chan_lock);
5382
5383 return 0;
5384}
5385
5386int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5387{
5388 struct l2cap_conn *conn = hcon->l2cap_data;
5389
5390 if (!conn)
5391 conn = l2cap_conn_add(hcon, 0);
5392
5393 if (!conn)
5394 goto drop;
5395
5396 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5397
5398 if (!(flags & ACL_CONT)) {
5399 struct l2cap_hdr *hdr;
5400 int len;
5401
5402 if (conn->rx_len) {
5403 BT_ERR("Unexpected start frame (len %d)", skb->len);
5404 kfree_skb(conn->rx_skb);
5405 conn->rx_skb = NULL;
5406 conn->rx_len = 0;
5407 l2cap_conn_unreliable(conn, ECOMM);
5408 }
5409
5410 /* Start fragment always begin with Basic L2CAP header */
5411 if (skb->len < L2CAP_HDR_SIZE) {
5412 BT_ERR("Frame is too short (len %d)", skb->len);
5413 l2cap_conn_unreliable(conn, ECOMM);
5414 goto drop;
5415 }
5416
5417 hdr = (struct l2cap_hdr *) skb->data;
5418 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5419
5420 if (len == skb->len) {
5421 /* Complete frame received */
5422 l2cap_recv_frame(conn, skb);
5423 return 0;
5424 }
5425
5426 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5427
5428 if (skb->len > len) {
5429 BT_ERR("Frame is too long (len %d, expected len %d)",
5430 skb->len, len);
5431 l2cap_conn_unreliable(conn, ECOMM);
5432 goto drop;
5433 }
5434
5435 /* Allocate skb for the complete frame (with header) */
5436 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5437 if (!conn->rx_skb)
5438 goto drop;
5439
5440 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5441 skb->len);
5442 conn->rx_len = len - skb->len;
5443 } else {
5444 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5445
5446 if (!conn->rx_len) {
5447 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5448 l2cap_conn_unreliable(conn, ECOMM);
5449 goto drop;
5450 }
5451
5452 if (skb->len > conn->rx_len) {
5453 BT_ERR("Fragment is too long (len %d, expected %d)",
5454 skb->len, conn->rx_len);
5455 kfree_skb(conn->rx_skb);
5456 conn->rx_skb = NULL;
5457 conn->rx_len = 0;
5458 l2cap_conn_unreliable(conn, ECOMM);
5459 goto drop;
5460 }
5461
5462 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5463 skb->len);
5464 conn->rx_len -= skb->len;
5465
5466 if (!conn->rx_len) {
5467 /* Complete frame received */
5468 l2cap_recv_frame(conn, conn->rx_skb);
5469 conn->rx_skb = NULL;
5470 }
5471 }
5472
5473drop:
5474 kfree_skb(skb);
5475 return 0;
5476}
5477
5478static int l2cap_debugfs_show(struct seq_file *f, void *p)
5479{
5480 struct l2cap_chan *c;
5481
5482 read_lock(&chan_list_lock);
5483
5484 list_for_each_entry(c, &chan_list, global_l) {
5485 struct sock *sk = c->sk;
5486
5487 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5488 batostr(&bt_sk(sk)->src),
5489 batostr(&bt_sk(sk)->dst),
5490 c->state, __le16_to_cpu(c->psm),
5491 c->scid, c->dcid, c->imtu, c->omtu,
5492 c->sec_level, c->mode);
5493 }
5494
5495 read_unlock(&chan_list_lock);
5496
5497 return 0;
5498}
5499
5500static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5501{
5502 return single_open(file, l2cap_debugfs_show, inode->i_private);
5503}
5504
5505static const struct file_operations l2cap_debugfs_fops = {
5506 .open = l2cap_debugfs_open,
5507 .read = seq_read,
5508 .llseek = seq_lseek,
5509 .release = single_release,
5510};
5511
5512static struct dentry *l2cap_debugfs;
5513
5514int __init l2cap_init(void)
5515{
5516 int err;
5517
5518 err = l2cap_init_sockets();
5519 if (err < 0)
5520 return err;
5521
5522 if (bt_debugfs) {
5523 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5524 bt_debugfs, NULL, &l2cap_debugfs_fops);
5525 if (!l2cap_debugfs)
5526 BT_ERR("Failed to create L2CAP debug file");
5527 }
5528
5529 return 0;
5530}
5531
5532void l2cap_exit(void)
5533{
5534 debugfs_remove(l2cap_debugfs);
5535 l2cap_cleanup_sockets();
5536}
5537
5538module_param(disable_ertm, bool, 0644);
5539MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");