Merge branch 'for-linville' of git://github.com/kvalo/ath
[linux-2.6-block.git] / net / bluetooth / hci_conn.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/l2cap.h>
32
33 #include "smp.h"
34 #include "a2mp.h"
35
36 struct sco_param {
37         u16 pkt_type;
38         u16 max_latency;
39         u8  retrans_effort;
40 };
41
42 static const struct sco_param esco_param_cvsd[] = {
43         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,   0x01 }, /* S3 */
44         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,   0x01 }, /* S2 */
45         { EDR_ESCO_MASK | ESCO_EV3,   0x0007,   0x01 }, /* S1 */
46         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0x01 }, /* D1 */
47         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0x01 }, /* D0 */
48 };
49
50 static const struct sco_param sco_param_cvsd[] = {
51         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0xff }, /* D1 */
52         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0xff }, /* D0 */
53 };
54
55 static const struct sco_param esco_param_msbc[] = {
56         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,   0x02 }, /* T2 */
57         { EDR_ESCO_MASK | ESCO_EV3,   0x0008,   0x02 }, /* T1 */
58 };
59
60 static void hci_le_create_connection_cancel(struct hci_conn *conn)
61 {
62         hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
63 }
64
65 static void hci_acl_create_connection(struct hci_conn *conn)
66 {
67         struct hci_dev *hdev = conn->hdev;
68         struct inquiry_entry *ie;
69         struct hci_cp_create_conn cp;
70
71         BT_DBG("hcon %p", conn);
72
73         conn->state = BT_CONNECT;
74         conn->out = true;
75         conn->role = HCI_ROLE_MASTER;
76
77         conn->attempt++;
78
79         conn->link_policy = hdev->link_policy;
80
81         memset(&cp, 0, sizeof(cp));
82         bacpy(&cp.bdaddr, &conn->dst);
83         cp.pscan_rep_mode = 0x02;
84
85         ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
86         if (ie) {
87                 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
88                         cp.pscan_rep_mode = ie->data.pscan_rep_mode;
89                         cp.pscan_mode     = ie->data.pscan_mode;
90                         cp.clock_offset   = ie->data.clock_offset |
91                                             cpu_to_le16(0x8000);
92                 }
93
94                 memcpy(conn->dev_class, ie->data.dev_class, 3);
95                 if (ie->data.ssp_mode > 0)
96                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
97         }
98
99         cp.pkt_type = cpu_to_le16(conn->pkt_type);
100         if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
101                 cp.role_switch = 0x01;
102         else
103                 cp.role_switch = 0x00;
104
105         hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
106 }
107
108 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
109 {
110         struct hci_cp_create_conn_cancel cp;
111
112         BT_DBG("hcon %p", conn);
113
114         if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
115                 return;
116
117         bacpy(&cp.bdaddr, &conn->dst);
118         hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
119 }
120
121 static void hci_reject_sco(struct hci_conn *conn)
122 {
123         struct hci_cp_reject_sync_conn_req cp;
124
125         cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
126         bacpy(&cp.bdaddr, &conn->dst);
127
128         hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
129 }
130
131 int hci_disconnect(struct hci_conn *conn, __u8 reason)
132 {
133         struct hci_cp_disconnect cp;
134
135         BT_DBG("hcon %p", conn);
136
137         /* When we are master of an established connection and it enters
138          * the disconnect timeout, then go ahead and try to read the
139          * current clock offset.  Processing of the result is done
140          * within the event handling and hci_clock_offset_evt function.
141          */
142         if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
143                 struct hci_dev *hdev = conn->hdev;
144                 struct hci_cp_read_clock_offset clkoff_cp;
145
146                 clkoff_cp.handle = cpu_to_le16(conn->handle);
147                 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
148                              &clkoff_cp);
149         }
150
151         conn->state = BT_DISCONN;
152
153         cp.handle = cpu_to_le16(conn->handle);
154         cp.reason = reason;
155         return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
156 }
157
158 static void hci_amp_disconn(struct hci_conn *conn)
159 {
160         struct hci_cp_disconn_phy_link cp;
161
162         BT_DBG("hcon %p", conn);
163
164         conn->state = BT_DISCONN;
165
166         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
167         cp.reason = hci_proto_disconn_ind(conn);
168         hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
169                      sizeof(cp), &cp);
170 }
171
172 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
173 {
174         struct hci_dev *hdev = conn->hdev;
175         struct hci_cp_add_sco cp;
176
177         BT_DBG("hcon %p", conn);
178
179         conn->state = BT_CONNECT;
180         conn->out = true;
181
182         conn->attempt++;
183
184         cp.handle   = cpu_to_le16(handle);
185         cp.pkt_type = cpu_to_le16(conn->pkt_type);
186
187         hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
188 }
189
190 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
191 {
192         struct hci_dev *hdev = conn->hdev;
193         struct hci_cp_setup_sync_conn cp;
194         const struct sco_param *param;
195
196         BT_DBG("hcon %p", conn);
197
198         conn->state = BT_CONNECT;
199         conn->out = true;
200
201         conn->attempt++;
202
203         cp.handle   = cpu_to_le16(handle);
204
205         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
206         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
207         cp.voice_setting  = cpu_to_le16(conn->setting);
208
209         switch (conn->setting & SCO_AIRMODE_MASK) {
210         case SCO_AIRMODE_TRANSP:
211                 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
212                         return false;
213                 param = &esco_param_msbc[conn->attempt - 1];
214                 break;
215         case SCO_AIRMODE_CVSD:
216                 if (lmp_esco_capable(conn->link)) {
217                         if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
218                                 return false;
219                         param = &esco_param_cvsd[conn->attempt - 1];
220                 } else {
221                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
222                                 return false;
223                         param = &sco_param_cvsd[conn->attempt - 1];
224                 }
225                 break;
226         default:
227                 return false;
228         }
229
230         cp.retrans_effort = param->retrans_effort;
231         cp.pkt_type = __cpu_to_le16(param->pkt_type);
232         cp.max_latency = __cpu_to_le16(param->max_latency);
233
234         if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
235                 return false;
236
237         return true;
238 }
239
240 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
241                       u16 to_multiplier)
242 {
243         struct hci_dev *hdev = conn->hdev;
244         struct hci_conn_params *params;
245         struct hci_cp_le_conn_update cp;
246
247         hci_dev_lock(hdev);
248
249         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
250         if (params) {
251                 params->conn_min_interval = min;
252                 params->conn_max_interval = max;
253                 params->conn_latency = latency;
254                 params->supervision_timeout = to_multiplier;
255         }
256
257         hci_dev_unlock(hdev);
258
259         memset(&cp, 0, sizeof(cp));
260         cp.handle               = cpu_to_le16(conn->handle);
261         cp.conn_interval_min    = cpu_to_le16(min);
262         cp.conn_interval_max    = cpu_to_le16(max);
263         cp.conn_latency         = cpu_to_le16(latency);
264         cp.supervision_timeout  = cpu_to_le16(to_multiplier);
265         cp.min_ce_len           = cpu_to_le16(0x0000);
266         cp.max_ce_len           = cpu_to_le16(0x0000);
267
268         hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
269
270         if (params)
271                 return 0x01;
272
273         return 0x00;
274 }
275
276 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
277                       __u8 ltk[16])
278 {
279         struct hci_dev *hdev = conn->hdev;
280         struct hci_cp_le_start_enc cp;
281
282         BT_DBG("hcon %p", conn);
283
284         memset(&cp, 0, sizeof(cp));
285
286         cp.handle = cpu_to_le16(conn->handle);
287         cp.rand = rand;
288         cp.ediv = ediv;
289         memcpy(cp.ltk, ltk, sizeof(cp.ltk));
290
291         hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
292 }
293
294 /* Device _must_ be locked */
295 void hci_sco_setup(struct hci_conn *conn, __u8 status)
296 {
297         struct hci_conn *sco = conn->link;
298
299         if (!sco)
300                 return;
301
302         BT_DBG("hcon %p", conn);
303
304         if (!status) {
305                 if (lmp_esco_capable(conn->hdev))
306                         hci_setup_sync(sco, conn->handle);
307                 else
308                         hci_add_sco(sco, conn->handle);
309         } else {
310                 hci_proto_connect_cfm(sco, status);
311                 hci_conn_del(sco);
312         }
313 }
314
315 static void hci_conn_timeout(struct work_struct *work)
316 {
317         struct hci_conn *conn = container_of(work, struct hci_conn,
318                                              disc_work.work);
319         int refcnt = atomic_read(&conn->refcnt);
320
321         BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
322
323         WARN_ON(refcnt < 0);
324
325         /* FIXME: It was observed that in pairing failed scenario, refcnt
326          * drops below 0. Probably this is because l2cap_conn_del calls
327          * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
328          * dropped. After that loop hci_chan_del is called which also drops
329          * conn. For now make sure that ACL is alive if refcnt is higher then 0,
330          * otherwise drop it.
331          */
332         if (refcnt > 0)
333                 return;
334
335         switch (conn->state) {
336         case BT_CONNECT:
337         case BT_CONNECT2:
338                 if (conn->out) {
339                         if (conn->type == ACL_LINK)
340                                 hci_acl_create_connection_cancel(conn);
341                         else if (conn->type == LE_LINK)
342                                 hci_le_create_connection_cancel(conn);
343                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
344                         hci_reject_sco(conn);
345                 }
346                 break;
347         case BT_CONFIG:
348         case BT_CONNECTED:
349                 if (conn->type == AMP_LINK) {
350                         hci_amp_disconn(conn);
351                 } else {
352                         __u8 reason = hci_proto_disconn_ind(conn);
353                         hci_disconnect(conn, reason);
354                 }
355                 break;
356         default:
357                 conn->state = BT_CLOSED;
358                 break;
359         }
360 }
361
362 /* Enter sniff mode */
363 static void hci_conn_idle(struct work_struct *work)
364 {
365         struct hci_conn *conn = container_of(work, struct hci_conn,
366                                              idle_work.work);
367         struct hci_dev *hdev = conn->hdev;
368
369         BT_DBG("hcon %p mode %d", conn, conn->mode);
370
371         if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
372                 return;
373
374         if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
375                 return;
376
377         if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
378                 struct hci_cp_sniff_subrate cp;
379                 cp.handle             = cpu_to_le16(conn->handle);
380                 cp.max_latency        = cpu_to_le16(0);
381                 cp.min_remote_timeout = cpu_to_le16(0);
382                 cp.min_local_timeout  = cpu_to_le16(0);
383                 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
384         }
385
386         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
387                 struct hci_cp_sniff_mode cp;
388                 cp.handle       = cpu_to_le16(conn->handle);
389                 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
390                 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
391                 cp.attempt      = cpu_to_le16(4);
392                 cp.timeout      = cpu_to_le16(1);
393                 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
394         }
395 }
396
397 static void hci_conn_auto_accept(struct work_struct *work)
398 {
399         struct hci_conn *conn = container_of(work, struct hci_conn,
400                                              auto_accept_work.work);
401
402         hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
403                      &conn->dst);
404 }
405
406 static void le_conn_timeout(struct work_struct *work)
407 {
408         struct hci_conn *conn = container_of(work, struct hci_conn,
409                                              le_conn_timeout.work);
410         struct hci_dev *hdev = conn->hdev;
411
412         BT_DBG("");
413
414         /* We could end up here due to having done directed advertising,
415          * so clean up the state if necessary. This should however only
416          * happen with broken hardware or if low duty cycle was used
417          * (which doesn't have a timeout of its own).
418          */
419         if (conn->role == HCI_ROLE_SLAVE) {
420                 u8 enable = 0x00;
421                 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
422                              &enable);
423                 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
424                 return;
425         }
426
427         hci_le_create_connection_cancel(conn);
428 }
429
430 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
431                               u8 role)
432 {
433         struct hci_conn *conn;
434
435         BT_DBG("%s dst %pMR", hdev->name, dst);
436
437         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
438         if (!conn)
439                 return NULL;
440
441         bacpy(&conn->dst, dst);
442         bacpy(&conn->src, &hdev->bdaddr);
443         conn->hdev  = hdev;
444         conn->type  = type;
445         conn->role  = role;
446         conn->mode  = HCI_CM_ACTIVE;
447         conn->state = BT_OPEN;
448         conn->auth_type = HCI_AT_GENERAL_BONDING;
449         conn->io_capability = hdev->io_capability;
450         conn->remote_auth = 0xff;
451         conn->key_type = 0xff;
452         conn->tx_power = HCI_TX_POWER_INVALID;
453         conn->max_tx_power = HCI_TX_POWER_INVALID;
454
455         set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
456         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
457
458         if (conn->role == HCI_ROLE_MASTER)
459                 conn->out = true;
460
461         switch (type) {
462         case ACL_LINK:
463                 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
464                 break;
465         case LE_LINK:
466                 /* conn->src should reflect the local identity address */
467                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
468                 break;
469         case SCO_LINK:
470                 if (lmp_esco_capable(hdev))
471                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
472                                         (hdev->esco_type & EDR_ESCO_MASK);
473                 else
474                         conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
475                 break;
476         case ESCO_LINK:
477                 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
478                 break;
479         }
480
481         skb_queue_head_init(&conn->data_q);
482
483         INIT_LIST_HEAD(&conn->chan_list);
484
485         INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
486         INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
487         INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
488         INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
489
490         atomic_set(&conn->refcnt, 0);
491
492         hci_dev_hold(hdev);
493
494         hci_conn_hash_add(hdev, conn);
495         if (hdev->notify)
496                 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
497
498         hci_conn_init_sysfs(conn);
499
500         return conn;
501 }
502
503 int hci_conn_del(struct hci_conn *conn)
504 {
505         struct hci_dev *hdev = conn->hdev;
506
507         BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
508
509         cancel_delayed_work_sync(&conn->disc_work);
510         cancel_delayed_work_sync(&conn->auto_accept_work);
511         cancel_delayed_work_sync(&conn->idle_work);
512
513         if (conn->type == ACL_LINK) {
514                 struct hci_conn *sco = conn->link;
515                 if (sco)
516                         sco->link = NULL;
517
518                 /* Unacked frames */
519                 hdev->acl_cnt += conn->sent;
520         } else if (conn->type == LE_LINK) {
521                 cancel_delayed_work(&conn->le_conn_timeout);
522
523                 if (hdev->le_pkts)
524                         hdev->le_cnt += conn->sent;
525                 else
526                         hdev->acl_cnt += conn->sent;
527         } else {
528                 struct hci_conn *acl = conn->link;
529                 if (acl) {
530                         acl->link = NULL;
531                         hci_conn_drop(acl);
532                 }
533         }
534
535         hci_chan_list_flush(conn);
536
537         if (conn->amp_mgr)
538                 amp_mgr_put(conn->amp_mgr);
539
540         hci_conn_hash_del(hdev, conn);
541         if (hdev->notify)
542                 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
543
544         skb_queue_purge(&conn->data_q);
545
546         hci_conn_del_sysfs(conn);
547
548         if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
549                 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
550
551         hci_dev_put(hdev);
552
553         hci_conn_put(conn);
554
555         return 0;
556 }
557
558 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
559 {
560         int use_src = bacmp(src, BDADDR_ANY);
561         struct hci_dev *hdev = NULL, *d;
562
563         BT_DBG("%pMR -> %pMR", src, dst);
564
565         read_lock(&hci_dev_list_lock);
566
567         list_for_each_entry(d, &hci_dev_list, list) {
568                 if (!test_bit(HCI_UP, &d->flags) ||
569                     test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
570                     d->dev_type != HCI_BREDR)
571                         continue;
572
573                 /* Simple routing:
574                  *   No source address - find interface with bdaddr != dst
575                  *   Source address    - find interface with bdaddr == src
576                  */
577
578                 if (use_src) {
579                         if (!bacmp(&d->bdaddr, src)) {
580                                 hdev = d; break;
581                         }
582                 } else {
583                         if (bacmp(&d->bdaddr, dst)) {
584                                 hdev = d; break;
585                         }
586                 }
587         }
588
589         if (hdev)
590                 hdev = hci_dev_hold(hdev);
591
592         read_unlock(&hci_dev_list_lock);
593         return hdev;
594 }
595 EXPORT_SYMBOL(hci_get_route);
596
597 /* This function requires the caller holds hdev->lock */
598 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
599 {
600         struct hci_dev *hdev = conn->hdev;
601         struct hci_conn_params *params;
602
603         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
604                                            conn->dst_type);
605         if (params && params->conn) {
606                 hci_conn_drop(params->conn);
607                 hci_conn_put(params->conn);
608                 params->conn = NULL;
609         }
610
611         conn->state = BT_CLOSED;
612
613         mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
614                             status);
615
616         hci_proto_connect_cfm(conn, status);
617
618         hci_conn_del(conn);
619
620         /* Since we may have temporarily stopped the background scanning in
621          * favor of connection establishment, we should restart it.
622          */
623         hci_update_background_scan(hdev);
624
625         /* Re-enable advertising in case this was a failed connection
626          * attempt as a peripheral.
627          */
628         mgmt_reenable_advertising(hdev);
629 }
630
631 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
632 {
633         struct hci_conn *conn;
634
635         if (status == 0)
636                 return;
637
638         BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
639                status);
640
641         hci_dev_lock(hdev);
642
643         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
644         if (!conn)
645                 goto done;
646
647         hci_le_conn_failed(conn, status);
648
649 done:
650         hci_dev_unlock(hdev);
651 }
652
653 static void hci_req_add_le_create_conn(struct hci_request *req,
654                                        struct hci_conn *conn)
655 {
656         struct hci_cp_le_create_conn cp;
657         struct hci_dev *hdev = conn->hdev;
658         u8 own_addr_type;
659
660         memset(&cp, 0, sizeof(cp));
661
662         /* Update random address, but set require_privacy to false so
663          * that we never connect with an unresolvable address.
664          */
665         if (hci_update_random_address(req, false, &own_addr_type))
666                 return;
667
668         cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
669         cp.scan_window = cpu_to_le16(hdev->le_scan_window);
670         bacpy(&cp.peer_addr, &conn->dst);
671         cp.peer_addr_type = conn->dst_type;
672         cp.own_address_type = own_addr_type;
673         cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
674         cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
675         cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
676         cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
677         cp.min_ce_len = cpu_to_le16(0x0000);
678         cp.max_ce_len = cpu_to_le16(0x0000);
679
680         hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
681
682         conn->state = BT_CONNECT;
683 }
684
685 static void hci_req_directed_advertising(struct hci_request *req,
686                                          struct hci_conn *conn)
687 {
688         struct hci_dev *hdev = req->hdev;
689         struct hci_cp_le_set_adv_param cp;
690         u8 own_addr_type;
691         u8 enable;
692
693         /* Clear the HCI_LE_ADV bit temporarily so that the
694          * hci_update_random_address knows that it's safe to go ahead
695          * and write a new random address. The flag will be set back on
696          * as soon as the SET_ADV_ENABLE HCI command completes.
697          */
698         clear_bit(HCI_LE_ADV, &hdev->dev_flags);
699
700         /* Set require_privacy to false so that the remote device has a
701          * chance of identifying us.
702          */
703         if (hci_update_random_address(req, false, &own_addr_type) < 0)
704                 return;
705
706         memset(&cp, 0, sizeof(cp));
707         cp.type = LE_ADV_DIRECT_IND;
708         cp.own_address_type = own_addr_type;
709         cp.direct_addr_type = conn->dst_type;
710         bacpy(&cp.direct_addr, &conn->dst);
711         cp.channel_map = hdev->le_adv_channel_map;
712
713         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
714
715         enable = 0x01;
716         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
717
718         conn->state = BT_CONNECT;
719 }
720
721 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
722                                 u8 dst_type, u8 sec_level, u16 conn_timeout,
723                                 u8 role)
724 {
725         struct hci_conn_params *params;
726         struct hci_conn *conn;
727         struct smp_irk *irk;
728         struct hci_request req;
729         int err;
730
731         /* Some devices send ATT messages as soon as the physical link is
732          * established. To be able to handle these ATT messages, the user-
733          * space first establishes the connection and then starts the pairing
734          * process.
735          *
736          * So if a hci_conn object already exists for the following connection
737          * attempt, we simply update pending_sec_level and auth_type fields
738          * and return the object found.
739          */
740         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
741         if (conn) {
742                 conn->pending_sec_level = sec_level;
743                 goto done;
744         }
745
746         /* Since the controller supports only one LE connection attempt at a
747          * time, we return -EBUSY if there is any connection attempt running.
748          */
749         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
750         if (conn)
751                 return ERR_PTR(-EBUSY);
752
753         /* When given an identity address with existing identity
754          * resolving key, the connection needs to be established
755          * to a resolvable random address.
756          *
757          * This uses the cached random resolvable address from
758          * a previous scan. When no cached address is available,
759          * try connecting to the identity address instead.
760          *
761          * Storing the resolvable random address is required here
762          * to handle connection failures. The address will later
763          * be resolved back into the original identity address
764          * from the connect request.
765          */
766         irk = hci_find_irk_by_addr(hdev, dst, dst_type);
767         if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
768                 dst = &irk->rpa;
769                 dst_type = ADDR_LE_DEV_RANDOM;
770         }
771
772         conn = hci_conn_add(hdev, LE_LINK, dst, role);
773         if (!conn)
774                 return ERR_PTR(-ENOMEM);
775
776         conn->dst_type = dst_type;
777         conn->sec_level = BT_SECURITY_LOW;
778         conn->pending_sec_level = sec_level;
779         conn->conn_timeout = conn_timeout;
780
781         hci_req_init(&req, hdev);
782
783         /* Disable advertising if we're active. For master role
784          * connections most controllers will refuse to connect if
785          * advertising is enabled, and for slave role connections we
786          * anyway have to disable it in order to start directed
787          * advertising.
788          */
789         if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
790                 u8 enable = 0x00;
791                 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
792                             &enable);
793         }
794
795         /* If requested to connect as slave use directed advertising */
796         if (conn->role == HCI_ROLE_SLAVE) {
797                 /* If we're active scanning most controllers are unable
798                  * to initiate advertising. Simply reject the attempt.
799                  */
800                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
801                     hdev->le_scan_type == LE_SCAN_ACTIVE) {
802                         skb_queue_purge(&req.cmd_q);
803                         hci_conn_del(conn);
804                         return ERR_PTR(-EBUSY);
805                 }
806
807                 hci_req_directed_advertising(&req, conn);
808                 goto create_conn;
809         }
810
811         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
812         if (params) {
813                 conn->le_conn_min_interval = params->conn_min_interval;
814                 conn->le_conn_max_interval = params->conn_max_interval;
815                 conn->le_conn_latency = params->conn_latency;
816                 conn->le_supv_timeout = params->supervision_timeout;
817         } else {
818                 conn->le_conn_min_interval = hdev->le_conn_min_interval;
819                 conn->le_conn_max_interval = hdev->le_conn_max_interval;
820                 conn->le_conn_latency = hdev->le_conn_latency;
821                 conn->le_supv_timeout = hdev->le_supv_timeout;
822         }
823
824         /* If controller is scanning, we stop it since some controllers are
825          * not able to scan and connect at the same time. Also set the
826          * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
827          * handler for scan disabling knows to set the correct discovery
828          * state.
829          */
830         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
831                 hci_req_add_le_scan_disable(&req);
832                 set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
833         }
834
835         hci_req_add_le_create_conn(&req, conn);
836
837 create_conn:
838         err = hci_req_run(&req, create_le_conn_complete);
839         if (err) {
840                 hci_conn_del(conn);
841                 return ERR_PTR(err);
842         }
843
844 done:
845         hci_conn_hold(conn);
846         return conn;
847 }
848
849 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
850                                  u8 sec_level, u8 auth_type)
851 {
852         struct hci_conn *acl;
853
854         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
855                 return ERR_PTR(-EOPNOTSUPP);
856
857         acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
858         if (!acl) {
859                 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
860                 if (!acl)
861                         return ERR_PTR(-ENOMEM);
862         }
863
864         hci_conn_hold(acl);
865
866         if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
867                 acl->sec_level = BT_SECURITY_LOW;
868                 acl->pending_sec_level = sec_level;
869                 acl->auth_type = auth_type;
870                 hci_acl_create_connection(acl);
871         }
872
873         return acl;
874 }
875
876 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
877                                  __u16 setting)
878 {
879         struct hci_conn *acl;
880         struct hci_conn *sco;
881
882         acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
883         if (IS_ERR(acl))
884                 return acl;
885
886         sco = hci_conn_hash_lookup_ba(hdev, type, dst);
887         if (!sco) {
888                 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
889                 if (!sco) {
890                         hci_conn_drop(acl);
891                         return ERR_PTR(-ENOMEM);
892                 }
893         }
894
895         acl->link = sco;
896         sco->link = acl;
897
898         hci_conn_hold(sco);
899
900         sco->setting = setting;
901
902         if (acl->state == BT_CONNECTED &&
903             (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
904                 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
905                 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
906
907                 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
908                         /* defer SCO setup until mode change completed */
909                         set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
910                         return sco;
911                 }
912
913                 hci_sco_setup(acl, 0x00);
914         }
915
916         return sco;
917 }
918
919 /* Check link security requirement */
920 int hci_conn_check_link_mode(struct hci_conn *conn)
921 {
922         BT_DBG("hcon %p", conn);
923
924         /* In Secure Connections Only mode, it is required that Secure
925          * Connections is used and the link is encrypted with AES-CCM
926          * using a P-256 authenticated combination key.
927          */
928         if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
929                 if (!hci_conn_sc_enabled(conn) ||
930                     !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
931                     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
932                         return 0;
933         }
934
935         if (hci_conn_ssp_enabled(conn) &&
936             !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
937                 return 0;
938
939         return 1;
940 }
941
942 /* Authenticate remote device */
943 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
944 {
945         BT_DBG("hcon %p", conn);
946
947         if (conn->pending_sec_level > sec_level)
948                 sec_level = conn->pending_sec_level;
949
950         if (sec_level > conn->sec_level)
951                 conn->pending_sec_level = sec_level;
952         else if (test_bit(HCI_CONN_AUTH, &conn->flags))
953                 return 1;
954
955         /* Make sure we preserve an existing MITM requirement*/
956         auth_type |= (conn->auth_type & 0x01);
957
958         conn->auth_type = auth_type;
959
960         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
961                 struct hci_cp_auth_requested cp;
962
963                 cp.handle = cpu_to_le16(conn->handle);
964                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
965                              sizeof(cp), &cp);
966
967                 /* If we're already encrypted set the REAUTH_PEND flag,
968                  * otherwise set the ENCRYPT_PEND.
969                  */
970                 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
971                         set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
972                 else
973                         set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
974         }
975
976         return 0;
977 }
978
979 /* Encrypt the the link */
980 static void hci_conn_encrypt(struct hci_conn *conn)
981 {
982         BT_DBG("hcon %p", conn);
983
984         if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
985                 struct hci_cp_set_conn_encrypt cp;
986                 cp.handle  = cpu_to_le16(conn->handle);
987                 cp.encrypt = 0x01;
988                 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
989                              &cp);
990         }
991 }
992
993 /* Enable security */
994 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
995                       bool initiator)
996 {
997         BT_DBG("hcon %p", conn);
998
999         if (conn->type == LE_LINK)
1000                 return smp_conn_security(conn, sec_level);
1001
1002         /* For sdp we don't need the link key. */
1003         if (sec_level == BT_SECURITY_SDP)
1004                 return 1;
1005
1006         /* For non 2.1 devices and low security level we don't need the link
1007            key. */
1008         if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1009                 return 1;
1010
1011         /* For other security levels we need the link key. */
1012         if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1013                 goto auth;
1014
1015         /* An authenticated FIPS approved combination key has sufficient
1016          * security for security level 4. */
1017         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1018             sec_level == BT_SECURITY_FIPS)
1019                 goto encrypt;
1020
1021         /* An authenticated combination key has sufficient security for
1022            security level 3. */
1023         if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1024              conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1025             sec_level == BT_SECURITY_HIGH)
1026                 goto encrypt;
1027
1028         /* An unauthenticated combination key has sufficient security for
1029            security level 1 and 2. */
1030         if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1031              conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1032             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1033                 goto encrypt;
1034
1035         /* A combination key has always sufficient security for the security
1036            levels 1 or 2. High security level requires the combination key
1037            is generated using maximum PIN code length (16).
1038            For pre 2.1 units. */
1039         if (conn->key_type == HCI_LK_COMBINATION &&
1040             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1041              conn->pin_length == 16))
1042                 goto encrypt;
1043
1044 auth:
1045         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1046                 return 0;
1047
1048         if (initiator)
1049                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1050
1051         if (!hci_conn_auth(conn, sec_level, auth_type))
1052                 return 0;
1053
1054 encrypt:
1055         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1056                 return 1;
1057
1058         hci_conn_encrypt(conn);
1059         return 0;
1060 }
1061 EXPORT_SYMBOL(hci_conn_security);
1062
1063 /* Check secure link requirement */
1064 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1065 {
1066         BT_DBG("hcon %p", conn);
1067
1068         /* Accept if non-secure or higher security level is required */
1069         if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1070                 return 1;
1071
1072         /* Accept if secure or higher security level is already present */
1073         if (conn->sec_level == BT_SECURITY_HIGH ||
1074             conn->sec_level == BT_SECURITY_FIPS)
1075                 return 1;
1076
1077         /* Reject not secure link */
1078         return 0;
1079 }
1080 EXPORT_SYMBOL(hci_conn_check_secure);
1081
1082 /* Change link key */
1083 int hci_conn_change_link_key(struct hci_conn *conn)
1084 {
1085         BT_DBG("hcon %p", conn);
1086
1087         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1088                 struct hci_cp_change_conn_link_key cp;
1089                 cp.handle = cpu_to_le16(conn->handle);
1090                 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1091                              sizeof(cp), &cp);
1092         }
1093
1094         return 0;
1095 }
1096
1097 /* Switch role */
1098 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1099 {
1100         BT_DBG("hcon %p", conn);
1101
1102         if (role == conn->role)
1103                 return 1;
1104
1105         if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1106                 struct hci_cp_switch_role cp;
1107                 bacpy(&cp.bdaddr, &conn->dst);
1108                 cp.role = role;
1109                 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1110         }
1111
1112         return 0;
1113 }
1114 EXPORT_SYMBOL(hci_conn_switch_role);
1115
1116 /* Enter active mode */
1117 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1118 {
1119         struct hci_dev *hdev = conn->hdev;
1120
1121         BT_DBG("hcon %p mode %d", conn, conn->mode);
1122
1123         if (conn->mode != HCI_CM_SNIFF)
1124                 goto timer;
1125
1126         if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1127                 goto timer;
1128
1129         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1130                 struct hci_cp_exit_sniff_mode cp;
1131                 cp.handle = cpu_to_le16(conn->handle);
1132                 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1133         }
1134
1135 timer:
1136         if (hdev->idle_timeout > 0)
1137                 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1138                                    msecs_to_jiffies(hdev->idle_timeout));
1139 }
1140
1141 /* Drop all connection on the device */
1142 void hci_conn_hash_flush(struct hci_dev *hdev)
1143 {
1144         struct hci_conn_hash *h = &hdev->conn_hash;
1145         struct hci_conn *c, *n;
1146
1147         BT_DBG("hdev %s", hdev->name);
1148
1149         list_for_each_entry_safe(c, n, &h->list, list) {
1150                 c->state = BT_CLOSED;
1151
1152                 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1153                 hci_conn_del(c);
1154         }
1155 }
1156
1157 /* Check pending connect attempts */
1158 void hci_conn_check_pending(struct hci_dev *hdev)
1159 {
1160         struct hci_conn *conn;
1161
1162         BT_DBG("hdev %s", hdev->name);
1163
1164         hci_dev_lock(hdev);
1165
1166         conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1167         if (conn)
1168                 hci_acl_create_connection(conn);
1169
1170         hci_dev_unlock(hdev);
1171 }
1172
1173 static u32 get_link_mode(struct hci_conn *conn)
1174 {
1175         u32 link_mode = 0;
1176
1177         if (conn->role == HCI_ROLE_MASTER)
1178                 link_mode |= HCI_LM_MASTER;
1179
1180         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1181                 link_mode |= HCI_LM_ENCRYPT;
1182
1183         if (test_bit(HCI_CONN_AUTH, &conn->flags))
1184                 link_mode |= HCI_LM_AUTH;
1185
1186         if (test_bit(HCI_CONN_SECURE, &conn->flags))
1187                 link_mode |= HCI_LM_SECURE;
1188
1189         if (test_bit(HCI_CONN_FIPS, &conn->flags))
1190                 link_mode |= HCI_LM_FIPS;
1191
1192         return link_mode;
1193 }
1194
1195 int hci_get_conn_list(void __user *arg)
1196 {
1197         struct hci_conn *c;
1198         struct hci_conn_list_req req, *cl;
1199         struct hci_conn_info *ci;
1200         struct hci_dev *hdev;
1201         int n = 0, size, err;
1202
1203         if (copy_from_user(&req, arg, sizeof(req)))
1204                 return -EFAULT;
1205
1206         if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1207                 return -EINVAL;
1208
1209         size = sizeof(req) + req.conn_num * sizeof(*ci);
1210
1211         cl = kmalloc(size, GFP_KERNEL);
1212         if (!cl)
1213                 return -ENOMEM;
1214
1215         hdev = hci_dev_get(req.dev_id);
1216         if (!hdev) {
1217                 kfree(cl);
1218                 return -ENODEV;
1219         }
1220
1221         ci = cl->conn_info;
1222
1223         hci_dev_lock(hdev);
1224         list_for_each_entry(c, &hdev->conn_hash.list, list) {
1225                 bacpy(&(ci + n)->bdaddr, &c->dst);
1226                 (ci + n)->handle = c->handle;
1227                 (ci + n)->type  = c->type;
1228                 (ci + n)->out   = c->out;
1229                 (ci + n)->state = c->state;
1230                 (ci + n)->link_mode = get_link_mode(c);
1231                 if (++n >= req.conn_num)
1232                         break;
1233         }
1234         hci_dev_unlock(hdev);
1235
1236         cl->dev_id = hdev->id;
1237         cl->conn_num = n;
1238         size = sizeof(req) + n * sizeof(*ci);
1239
1240         hci_dev_put(hdev);
1241
1242         err = copy_to_user(arg, cl, size);
1243         kfree(cl);
1244
1245         return err ? -EFAULT : 0;
1246 }
1247
1248 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1249 {
1250         struct hci_conn_info_req req;
1251         struct hci_conn_info ci;
1252         struct hci_conn *conn;
1253         char __user *ptr = arg + sizeof(req);
1254
1255         if (copy_from_user(&req, arg, sizeof(req)))
1256                 return -EFAULT;
1257
1258         hci_dev_lock(hdev);
1259         conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1260         if (conn) {
1261                 bacpy(&ci.bdaddr, &conn->dst);
1262                 ci.handle = conn->handle;
1263                 ci.type  = conn->type;
1264                 ci.out   = conn->out;
1265                 ci.state = conn->state;
1266                 ci.link_mode = get_link_mode(conn);
1267         }
1268         hci_dev_unlock(hdev);
1269
1270         if (!conn)
1271                 return -ENOENT;
1272
1273         return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1274 }
1275
1276 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1277 {
1278         struct hci_auth_info_req req;
1279         struct hci_conn *conn;
1280
1281         if (copy_from_user(&req, arg, sizeof(req)))
1282                 return -EFAULT;
1283
1284         hci_dev_lock(hdev);
1285         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1286         if (conn)
1287                 req.type = conn->auth_type;
1288         hci_dev_unlock(hdev);
1289
1290         if (!conn)
1291                 return -ENOENT;
1292
1293         return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1294 }
1295
1296 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1297 {
1298         struct hci_dev *hdev = conn->hdev;
1299         struct hci_chan *chan;
1300
1301         BT_DBG("%s hcon %p", hdev->name, conn);
1302
1303         if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1304                 BT_DBG("Refusing to create new hci_chan");
1305                 return NULL;
1306         }
1307
1308         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1309         if (!chan)
1310                 return NULL;
1311
1312         chan->conn = hci_conn_get(conn);
1313         skb_queue_head_init(&chan->data_q);
1314         chan->state = BT_CONNECTED;
1315
1316         list_add_rcu(&chan->list, &conn->chan_list);
1317
1318         return chan;
1319 }
1320
1321 void hci_chan_del(struct hci_chan *chan)
1322 {
1323         struct hci_conn *conn = chan->conn;
1324         struct hci_dev *hdev = conn->hdev;
1325
1326         BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1327
1328         list_del_rcu(&chan->list);
1329
1330         synchronize_rcu();
1331
1332         /* Prevent new hci_chan's to be created for this hci_conn */
1333         set_bit(HCI_CONN_DROP, &conn->flags);
1334
1335         hci_conn_put(conn);
1336
1337         skb_queue_purge(&chan->data_q);
1338         kfree(chan);
1339 }
1340
1341 void hci_chan_list_flush(struct hci_conn *conn)
1342 {
1343         struct hci_chan *chan, *n;
1344
1345         BT_DBG("hcon %p", conn);
1346
1347         list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1348                 hci_chan_del(chan);
1349 }
1350
1351 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1352                                                  __u16 handle)
1353 {
1354         struct hci_chan *hchan;
1355
1356         list_for_each_entry(hchan, &hcon->chan_list, list) {
1357                 if (hchan->handle == handle)
1358                         return hchan;
1359         }
1360
1361         return NULL;
1362 }
1363
1364 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1365 {
1366         struct hci_conn_hash *h = &hdev->conn_hash;
1367         struct hci_conn *hcon;
1368         struct hci_chan *hchan = NULL;
1369
1370         rcu_read_lock();
1371
1372         list_for_each_entry_rcu(hcon, &h->list, list) {
1373                 hchan = __hci_chan_lookup_handle(hcon, handle);
1374                 if (hchan)
1375                         break;
1376         }
1377
1378         rcu_read_unlock();
1379
1380         return hchan;
1381 }