Bluetooth: Handle AD updating through an async request
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 /* Execute request and wait for completion. */
83 static int __hci_req_sync(struct hci_dev *hdev,
84                           void (*func)(struct hci_request *req,
85                                       unsigned long opt),
86                           unsigned long opt, __u32 timeout)
87 {
88         struct hci_request req;
89         DECLARE_WAITQUEUE(wait, current);
90         int err = 0;
91
92         BT_DBG("%s start", hdev->name);
93
94         hci_req_init(&req, hdev);
95
96         hdev->req_status = HCI_REQ_PEND;
97
98         func(&req, opt);
99
100         err = hci_req_run(&req, hci_req_sync_complete);
101         if (err < 0) {
102                 hdev->req_status = 0;
103
104                 /* ENODATA means the HCI request command queue is empty.
105                  * This can happen when a request with conditionals doesn't
106                  * trigger any commands to be sent. This is normal behavior
107                  * and should not trigger an error return.
108                  */
109                 if (err == -ENODATA)
110                         return 0;
111
112                 return err;
113         }
114
115         add_wait_queue(&hdev->req_wait_q, &wait);
116         set_current_state(TASK_INTERRUPTIBLE);
117
118         schedule_timeout(timeout);
119
120         remove_wait_queue(&hdev->req_wait_q, &wait);
121
122         if (signal_pending(current))
123                 return -EINTR;
124
125         switch (hdev->req_status) {
126         case HCI_REQ_DONE:
127                 err = -bt_to_errno(hdev->req_result);
128                 break;
129
130         case HCI_REQ_CANCELED:
131                 err = -hdev->req_result;
132                 break;
133
134         default:
135                 err = -ETIMEDOUT;
136                 break;
137         }
138
139         hdev->req_status = hdev->req_result = 0;
140
141         BT_DBG("%s end: err %d", hdev->name, err);
142
143         return err;
144 }
145
146 static int hci_req_sync(struct hci_dev *hdev,
147                         void (*req)(struct hci_request *req,
148                                     unsigned long opt),
149                         unsigned long opt, __u32 timeout)
150 {
151         int ret;
152
153         if (!test_bit(HCI_UP, &hdev->flags))
154                 return -ENETDOWN;
155
156         /* Serialize all requests */
157         hci_req_lock(hdev);
158         ret = __hci_req_sync(hdev, req, opt, timeout);
159         hci_req_unlock(hdev);
160
161         return ret;
162 }
163
164 static void hci_reset_req(struct hci_request *req, unsigned long opt)
165 {
166         BT_DBG("%s %ld", req->hdev->name, opt);
167
168         /* Reset device */
169         set_bit(HCI_RESET, &req->hdev->flags);
170         hci_req_add(req, HCI_OP_RESET, 0, NULL);
171 }
172
173 static void bredr_init(struct hci_request *req)
174 {
175         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
176
177         /* Read Local Supported Features */
178         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
179
180         /* Read Local Version */
181         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
182
183         /* Read BD Address */
184         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
185 }
186
187 static void amp_init(struct hci_request *req)
188 {
189         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
190
191         /* Read Local Version */
192         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
193
194         /* Read Local AMP Info */
195         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
196
197         /* Read Data Blk size */
198         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
199 }
200
201 static void hci_init1_req(struct hci_request *req, unsigned long opt)
202 {
203         struct hci_dev *hdev = req->hdev;
204         struct hci_request init_req;
205         struct sk_buff *skb;
206
207         BT_DBG("%s %ld", hdev->name, opt);
208
209         /* Driver initialization */
210
211         hci_req_init(&init_req, hdev);
212
213         /* Special commands */
214         while ((skb = skb_dequeue(&hdev->driver_init))) {
215                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216                 skb->dev = (void *) hdev;
217
218                 if (skb_queue_empty(&init_req.cmd_q))
219                         bt_cb(skb)->req.start = true;
220
221                 skb_queue_tail(&init_req.cmd_q, skb);
222         }
223         skb_queue_purge(&hdev->driver_init);
224
225         hci_req_run(&init_req, NULL);
226
227         /* Reset */
228         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
229                 hci_reset_req(req, 0);
230
231         switch (hdev->dev_type) {
232         case HCI_BREDR:
233                 bredr_init(req);
234                 break;
235
236         case HCI_AMP:
237                 amp_init(req);
238                 break;
239
240         default:
241                 BT_ERR("Unknown device type %d", hdev->dev_type);
242                 break;
243         }
244 }
245
246 static void bredr_setup(struct hci_request *req)
247 {
248         struct hci_cp_delete_stored_link_key cp;
249         __le16 param;
250         __u8 flt_type;
251
252         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
253         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
254
255         /* Read Class of Device */
256         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
257
258         /* Read Local Name */
259         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
260
261         /* Read Voice Setting */
262         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
263
264         /* Clear Event Filters */
265         flt_type = HCI_FLT_CLEAR_ALL;
266         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
267
268         /* Connection accept timeout ~20 secs */
269         param = __constant_cpu_to_le16(0x7d00);
270         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
271
272         bacpy(&cp.bdaddr, BDADDR_ANY);
273         cp.delete_all = 0x01;
274         hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
275 }
276
277 static void le_setup(struct hci_request *req)
278 {
279         /* Read LE Buffer Size */
280         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
281
282         /* Read LE Local Supported Features */
283         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
284
285         /* Read LE Advertising Channel TX Power */
286         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
287
288         /* Read LE White List Size */
289         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
290
291         /* Read LE Supported States */
292         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
293 }
294
295 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
296 {
297         if (lmp_ext_inq_capable(hdev))
298                 return 0x02;
299
300         if (lmp_inq_rssi_capable(hdev))
301                 return 0x01;
302
303         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
304             hdev->lmp_subver == 0x0757)
305                 return 0x01;
306
307         if (hdev->manufacturer == 15) {
308                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
309                         return 0x01;
310                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
311                         return 0x01;
312                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
313                         return 0x01;
314         }
315
316         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
317             hdev->lmp_subver == 0x1805)
318                 return 0x01;
319
320         return 0x00;
321 }
322
323 static void hci_setup_inquiry_mode(struct hci_request *req)
324 {
325         u8 mode;
326
327         mode = hci_get_inquiry_mode(req->hdev);
328
329         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
330 }
331
332 static void hci_setup_event_mask(struct hci_request *req)
333 {
334         struct hci_dev *hdev = req->hdev;
335
336         /* The second byte is 0xff instead of 0x9f (two reserved bits
337          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
338          * command otherwise.
339          */
340         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
341
342         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
343          * any event mask for pre 1.2 devices.
344          */
345         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
346                 return;
347
348         if (lmp_bredr_capable(hdev)) {
349                 events[4] |= 0x01; /* Flow Specification Complete */
350                 events[4] |= 0x02; /* Inquiry Result with RSSI */
351                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
352                 events[5] |= 0x08; /* Synchronous Connection Complete */
353                 events[5] |= 0x10; /* Synchronous Connection Changed */
354         }
355
356         if (lmp_inq_rssi_capable(hdev))
357                 events[4] |= 0x02; /* Inquiry Result with RSSI */
358
359         if (lmp_sniffsubr_capable(hdev))
360                 events[5] |= 0x20; /* Sniff Subrating */
361
362         if (lmp_pause_enc_capable(hdev))
363                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
364
365         if (lmp_ext_inq_capable(hdev))
366                 events[5] |= 0x40; /* Extended Inquiry Result */
367
368         if (lmp_no_flush_capable(hdev))
369                 events[7] |= 0x01; /* Enhanced Flush Complete */
370
371         if (lmp_lsto_capable(hdev))
372                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
373
374         if (lmp_ssp_capable(hdev)) {
375                 events[6] |= 0x01;      /* IO Capability Request */
376                 events[6] |= 0x02;      /* IO Capability Response */
377                 events[6] |= 0x04;      /* User Confirmation Request */
378                 events[6] |= 0x08;      /* User Passkey Request */
379                 events[6] |= 0x10;      /* Remote OOB Data Request */
380                 events[6] |= 0x20;      /* Simple Pairing Complete */
381                 events[7] |= 0x04;      /* User Passkey Notification */
382                 events[7] |= 0x08;      /* Keypress Notification */
383                 events[7] |= 0x10;      /* Remote Host Supported
384                                          * Features Notification
385                                          */
386         }
387
388         if (lmp_le_capable(hdev))
389                 events[7] |= 0x20;      /* LE Meta-Event */
390
391         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
392
393         if (lmp_le_capable(hdev)) {
394                 memset(events, 0, sizeof(events));
395                 events[0] = 0x1f;
396                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
397                             sizeof(events), events);
398         }
399 }
400
401 static void hci_init2_req(struct hci_request *req, unsigned long opt)
402 {
403         struct hci_dev *hdev = req->hdev;
404
405         if (lmp_bredr_capable(hdev))
406                 bredr_setup(req);
407
408         if (lmp_le_capable(hdev))
409                 le_setup(req);
410
411         hci_setup_event_mask(req);
412
413         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
414                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
415
416         if (lmp_ssp_capable(hdev)) {
417                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
418                         u8 mode = 0x01;
419                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
420                                     sizeof(mode), &mode);
421                 } else {
422                         struct hci_cp_write_eir cp;
423
424                         memset(hdev->eir, 0, sizeof(hdev->eir));
425                         memset(&cp, 0, sizeof(cp));
426
427                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
428                 }
429         }
430
431         if (lmp_inq_rssi_capable(hdev))
432                 hci_setup_inquiry_mode(req);
433
434         if (lmp_inq_tx_pwr_capable(hdev))
435                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
436
437         if (lmp_ext_feat_capable(hdev)) {
438                 struct hci_cp_read_local_ext_features cp;
439
440                 cp.page = 0x01;
441                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
442                             sizeof(cp), &cp);
443         }
444
445         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
446                 u8 enable = 1;
447                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
448                             &enable);
449         }
450 }
451
452 static void hci_setup_link_policy(struct hci_request *req)
453 {
454         struct hci_dev *hdev = req->hdev;
455         struct hci_cp_write_def_link_policy cp;
456         u16 link_policy = 0;
457
458         if (lmp_rswitch_capable(hdev))
459                 link_policy |= HCI_LP_RSWITCH;
460         if (lmp_hold_capable(hdev))
461                 link_policy |= HCI_LP_HOLD;
462         if (lmp_sniff_capable(hdev))
463                 link_policy |= HCI_LP_SNIFF;
464         if (lmp_park_capable(hdev))
465                 link_policy |= HCI_LP_PARK;
466
467         cp.policy = cpu_to_le16(link_policy);
468         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
469 }
470
471 static void hci_set_le_support(struct hci_request *req)
472 {
473         struct hci_dev *hdev = req->hdev;
474         struct hci_cp_write_le_host_supported cp;
475
476         memset(&cp, 0, sizeof(cp));
477
478         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
479                 cp.le = 0x01;
480                 cp.simul = lmp_le_br_capable(hdev);
481         }
482
483         if (cp.le != lmp_host_le_capable(hdev))
484                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
485                             &cp);
486 }
487
488 static void hci_init3_req(struct hci_request *req, unsigned long opt)
489 {
490         struct hci_dev *hdev = req->hdev;
491
492         if (hdev->commands[5] & 0x10)
493                 hci_setup_link_policy(req);
494
495         if (lmp_le_capable(hdev)) {
496                 hci_set_le_support(req);
497                 hci_update_ad(req);
498         }
499 }
500
501 static int __hci_init(struct hci_dev *hdev)
502 {
503         int err;
504
505         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
506         if (err < 0)
507                 return err;
508
509         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
510          * BR/EDR/LE type controllers. AMP controllers only need the
511          * first stage init.
512          */
513         if (hdev->dev_type != HCI_BREDR)
514                 return 0;
515
516         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
517         if (err < 0)
518                 return err;
519
520         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
521 }
522
523 static void hci_scan_req(struct hci_request *req, unsigned long opt)
524 {
525         __u8 scan = opt;
526
527         BT_DBG("%s %x", req->hdev->name, scan);
528
529         /* Inquiry and Page scans */
530         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
531 }
532
533 static void hci_auth_req(struct hci_request *req, unsigned long opt)
534 {
535         __u8 auth = opt;
536
537         BT_DBG("%s %x", req->hdev->name, auth);
538
539         /* Authentication */
540         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
541 }
542
543 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
544 {
545         __u8 encrypt = opt;
546
547         BT_DBG("%s %x", req->hdev->name, encrypt);
548
549         /* Encryption */
550         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
551 }
552
553 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
554 {
555         __le16 policy = cpu_to_le16(opt);
556
557         BT_DBG("%s %x", req->hdev->name, policy);
558
559         /* Default link policy */
560         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
561 }
562
563 /* Get HCI device by index.
564  * Device is held on return. */
565 struct hci_dev *hci_dev_get(int index)
566 {
567         struct hci_dev *hdev = NULL, *d;
568
569         BT_DBG("%d", index);
570
571         if (index < 0)
572                 return NULL;
573
574         read_lock(&hci_dev_list_lock);
575         list_for_each_entry(d, &hci_dev_list, list) {
576                 if (d->id == index) {
577                         hdev = hci_dev_hold(d);
578                         break;
579                 }
580         }
581         read_unlock(&hci_dev_list_lock);
582         return hdev;
583 }
584
585 /* ---- Inquiry support ---- */
586
587 bool hci_discovery_active(struct hci_dev *hdev)
588 {
589         struct discovery_state *discov = &hdev->discovery;
590
591         switch (discov->state) {
592         case DISCOVERY_FINDING:
593         case DISCOVERY_RESOLVING:
594                 return true;
595
596         default:
597                 return false;
598         }
599 }
600
601 void hci_discovery_set_state(struct hci_dev *hdev, int state)
602 {
603         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
604
605         if (hdev->discovery.state == state)
606                 return;
607
608         switch (state) {
609         case DISCOVERY_STOPPED:
610                 if (hdev->discovery.state != DISCOVERY_STARTING)
611                         mgmt_discovering(hdev, 0);
612                 break;
613         case DISCOVERY_STARTING:
614                 break;
615         case DISCOVERY_FINDING:
616                 mgmt_discovering(hdev, 1);
617                 break;
618         case DISCOVERY_RESOLVING:
619                 break;
620         case DISCOVERY_STOPPING:
621                 break;
622         }
623
624         hdev->discovery.state = state;
625 }
626
627 static void inquiry_cache_flush(struct hci_dev *hdev)
628 {
629         struct discovery_state *cache = &hdev->discovery;
630         struct inquiry_entry *p, *n;
631
632         list_for_each_entry_safe(p, n, &cache->all, all) {
633                 list_del(&p->all);
634                 kfree(p);
635         }
636
637         INIT_LIST_HEAD(&cache->unknown);
638         INIT_LIST_HEAD(&cache->resolve);
639 }
640
641 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
642                                                bdaddr_t *bdaddr)
643 {
644         struct discovery_state *cache = &hdev->discovery;
645         struct inquiry_entry *e;
646
647         BT_DBG("cache %p, %pMR", cache, bdaddr);
648
649         list_for_each_entry(e, &cache->all, all) {
650                 if (!bacmp(&e->data.bdaddr, bdaddr))
651                         return e;
652         }
653
654         return NULL;
655 }
656
657 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
658                                                        bdaddr_t *bdaddr)
659 {
660         struct discovery_state *cache = &hdev->discovery;
661         struct inquiry_entry *e;
662
663         BT_DBG("cache %p, %pMR", cache, bdaddr);
664
665         list_for_each_entry(e, &cache->unknown, list) {
666                 if (!bacmp(&e->data.bdaddr, bdaddr))
667                         return e;
668         }
669
670         return NULL;
671 }
672
673 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
674                                                        bdaddr_t *bdaddr,
675                                                        int state)
676 {
677         struct discovery_state *cache = &hdev->discovery;
678         struct inquiry_entry *e;
679
680         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
681
682         list_for_each_entry(e, &cache->resolve, list) {
683                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
684                         return e;
685                 if (!bacmp(&e->data.bdaddr, bdaddr))
686                         return e;
687         }
688
689         return NULL;
690 }
691
692 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
693                                       struct inquiry_entry *ie)
694 {
695         struct discovery_state *cache = &hdev->discovery;
696         struct list_head *pos = &cache->resolve;
697         struct inquiry_entry *p;
698
699         list_del(&ie->list);
700
701         list_for_each_entry(p, &cache->resolve, list) {
702                 if (p->name_state != NAME_PENDING &&
703                     abs(p->data.rssi) >= abs(ie->data.rssi))
704                         break;
705                 pos = &p->list;
706         }
707
708         list_add(&ie->list, pos);
709 }
710
711 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
712                               bool name_known, bool *ssp)
713 {
714         struct discovery_state *cache = &hdev->discovery;
715         struct inquiry_entry *ie;
716
717         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
718
719         hci_remove_remote_oob_data(hdev, &data->bdaddr);
720
721         if (ssp)
722                 *ssp = data->ssp_mode;
723
724         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
725         if (ie) {
726                 if (ie->data.ssp_mode && ssp)
727                         *ssp = true;
728
729                 if (ie->name_state == NAME_NEEDED &&
730                     data->rssi != ie->data.rssi) {
731                         ie->data.rssi = data->rssi;
732                         hci_inquiry_cache_update_resolve(hdev, ie);
733                 }
734
735                 goto update;
736         }
737
738         /* Entry not in the cache. Add new one. */
739         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
740         if (!ie)
741                 return false;
742
743         list_add(&ie->all, &cache->all);
744
745         if (name_known) {
746                 ie->name_state = NAME_KNOWN;
747         } else {
748                 ie->name_state = NAME_NOT_KNOWN;
749                 list_add(&ie->list, &cache->unknown);
750         }
751
752 update:
753         if (name_known && ie->name_state != NAME_KNOWN &&
754             ie->name_state != NAME_PENDING) {
755                 ie->name_state = NAME_KNOWN;
756                 list_del(&ie->list);
757         }
758
759         memcpy(&ie->data, data, sizeof(*data));
760         ie->timestamp = jiffies;
761         cache->timestamp = jiffies;
762
763         if (ie->name_state == NAME_NOT_KNOWN)
764                 return false;
765
766         return true;
767 }
768
769 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
770 {
771         struct discovery_state *cache = &hdev->discovery;
772         struct inquiry_info *info = (struct inquiry_info *) buf;
773         struct inquiry_entry *e;
774         int copied = 0;
775
776         list_for_each_entry(e, &cache->all, all) {
777                 struct inquiry_data *data = &e->data;
778
779                 if (copied >= num)
780                         break;
781
782                 bacpy(&info->bdaddr, &data->bdaddr);
783                 info->pscan_rep_mode    = data->pscan_rep_mode;
784                 info->pscan_period_mode = data->pscan_period_mode;
785                 info->pscan_mode        = data->pscan_mode;
786                 memcpy(info->dev_class, data->dev_class, 3);
787                 info->clock_offset      = data->clock_offset;
788
789                 info++;
790                 copied++;
791         }
792
793         BT_DBG("cache %p, copied %d", cache, copied);
794         return copied;
795 }
796
797 static void hci_inq_req(struct hci_request *req, unsigned long opt)
798 {
799         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
800         struct hci_dev *hdev = req->hdev;
801         struct hci_cp_inquiry cp;
802
803         BT_DBG("%s", hdev->name);
804
805         if (test_bit(HCI_INQUIRY, &hdev->flags))
806                 return;
807
808         /* Start Inquiry */
809         memcpy(&cp.lap, &ir->lap, 3);
810         cp.length  = ir->length;
811         cp.num_rsp = ir->num_rsp;
812         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
813 }
814
815 int hci_inquiry(void __user *arg)
816 {
817         __u8 __user *ptr = arg;
818         struct hci_inquiry_req ir;
819         struct hci_dev *hdev;
820         int err = 0, do_inquiry = 0, max_rsp;
821         long timeo;
822         __u8 *buf;
823
824         if (copy_from_user(&ir, ptr, sizeof(ir)))
825                 return -EFAULT;
826
827         hdev = hci_dev_get(ir.dev_id);
828         if (!hdev)
829                 return -ENODEV;
830
831         hci_dev_lock(hdev);
832         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
833             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
834                 inquiry_cache_flush(hdev);
835                 do_inquiry = 1;
836         }
837         hci_dev_unlock(hdev);
838
839         timeo = ir.length * msecs_to_jiffies(2000);
840
841         if (do_inquiry) {
842                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
843                                    timeo);
844                 if (err < 0)
845                         goto done;
846         }
847
848         /* for unlimited number of responses we will use buffer with
849          * 255 entries
850          */
851         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
852
853         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
854          * copy it to the user space.
855          */
856         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
857         if (!buf) {
858                 err = -ENOMEM;
859                 goto done;
860         }
861
862         hci_dev_lock(hdev);
863         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
864         hci_dev_unlock(hdev);
865
866         BT_DBG("num_rsp %d", ir.num_rsp);
867
868         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
869                 ptr += sizeof(ir);
870                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
871                                  ir.num_rsp))
872                         err = -EFAULT;
873         } else
874                 err = -EFAULT;
875
876         kfree(buf);
877
878 done:
879         hci_dev_put(hdev);
880         return err;
881 }
882
883 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
884 {
885         u8 ad_len = 0, flags = 0;
886         size_t name_len;
887
888         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
889                 flags |= LE_AD_GENERAL;
890
891         if (!lmp_bredr_capable(hdev))
892                 flags |= LE_AD_NO_BREDR;
893
894         if (lmp_le_br_capable(hdev))
895                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
896
897         if (lmp_host_le_br_capable(hdev))
898                 flags |= LE_AD_SIM_LE_BREDR_HOST;
899
900         if (flags) {
901                 BT_DBG("adv flags 0x%02x", flags);
902
903                 ptr[0] = 2;
904                 ptr[1] = EIR_FLAGS;
905                 ptr[2] = flags;
906
907                 ad_len += 3;
908                 ptr += 3;
909         }
910
911         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
912                 ptr[0] = 2;
913                 ptr[1] = EIR_TX_POWER;
914                 ptr[2] = (u8) hdev->adv_tx_power;
915
916                 ad_len += 3;
917                 ptr += 3;
918         }
919
920         name_len = strlen(hdev->dev_name);
921         if (name_len > 0) {
922                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
923
924                 if (name_len > max_len) {
925                         name_len = max_len;
926                         ptr[1] = EIR_NAME_SHORT;
927                 } else
928                         ptr[1] = EIR_NAME_COMPLETE;
929
930                 ptr[0] = name_len + 1;
931
932                 memcpy(ptr + 2, hdev->dev_name, name_len);
933
934                 ad_len += (name_len + 2);
935                 ptr += (name_len + 2);
936         }
937
938         return ad_len;
939 }
940
941 void hci_update_ad(struct hci_request *req)
942 {
943         struct hci_dev *hdev = req->hdev;
944         struct hci_cp_le_set_adv_data cp;
945         u8 len;
946
947         if (!lmp_le_capable(hdev))
948                 return;
949
950         memset(&cp, 0, sizeof(cp));
951
952         len = create_ad(hdev, cp.data);
953
954         if (hdev->adv_data_len == len &&
955             memcmp(cp.data, hdev->adv_data, len) == 0)
956                 return;
957
958         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
959         hdev->adv_data_len = len;
960
961         cp.length = len;
962
963         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
964 }
965
966 /* ---- HCI ioctl helpers ---- */
967
968 int hci_dev_open(__u16 dev)
969 {
970         struct hci_dev *hdev;
971         int ret = 0;
972
973         hdev = hci_dev_get(dev);
974         if (!hdev)
975                 return -ENODEV;
976
977         BT_DBG("%s %p", hdev->name, hdev);
978
979         hci_req_lock(hdev);
980
981         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
982                 ret = -ENODEV;
983                 goto done;
984         }
985
986         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
987                 ret = -ERFKILL;
988                 goto done;
989         }
990
991         if (test_bit(HCI_UP, &hdev->flags)) {
992                 ret = -EALREADY;
993                 goto done;
994         }
995
996         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
997                 set_bit(HCI_RAW, &hdev->flags);
998
999         /* Treat all non BR/EDR controllers as raw devices if
1000            enable_hs is not set */
1001         if (hdev->dev_type != HCI_BREDR && !enable_hs)
1002                 set_bit(HCI_RAW, &hdev->flags);
1003
1004         if (hdev->open(hdev)) {
1005                 ret = -EIO;
1006                 goto done;
1007         }
1008
1009         if (!test_bit(HCI_RAW, &hdev->flags)) {
1010                 atomic_set(&hdev->cmd_cnt, 1);
1011                 set_bit(HCI_INIT, &hdev->flags);
1012                 ret = __hci_init(hdev);
1013                 clear_bit(HCI_INIT, &hdev->flags);
1014         }
1015
1016         if (!ret) {
1017                 hci_dev_hold(hdev);
1018                 set_bit(HCI_UP, &hdev->flags);
1019                 hci_notify(hdev, HCI_DEV_UP);
1020                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1021                     mgmt_valid_hdev(hdev)) {
1022                         hci_dev_lock(hdev);
1023                         mgmt_powered(hdev, 1);
1024                         hci_dev_unlock(hdev);
1025                 }
1026         } else {
1027                 /* Init failed, cleanup */
1028                 flush_work(&hdev->tx_work);
1029                 flush_work(&hdev->cmd_work);
1030                 flush_work(&hdev->rx_work);
1031
1032                 skb_queue_purge(&hdev->cmd_q);
1033                 skb_queue_purge(&hdev->rx_q);
1034
1035                 if (hdev->flush)
1036                         hdev->flush(hdev);
1037
1038                 if (hdev->sent_cmd) {
1039                         kfree_skb(hdev->sent_cmd);
1040                         hdev->sent_cmd = NULL;
1041                 }
1042
1043                 hdev->close(hdev);
1044                 hdev->flags = 0;
1045         }
1046
1047 done:
1048         hci_req_unlock(hdev);
1049         hci_dev_put(hdev);
1050         return ret;
1051 }
1052
1053 static int hci_dev_do_close(struct hci_dev *hdev)
1054 {
1055         BT_DBG("%s %p", hdev->name, hdev);
1056
1057         cancel_work_sync(&hdev->le_scan);
1058
1059         cancel_delayed_work(&hdev->power_off);
1060
1061         hci_req_cancel(hdev, ENODEV);
1062         hci_req_lock(hdev);
1063
1064         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1065                 del_timer_sync(&hdev->cmd_timer);
1066                 hci_req_unlock(hdev);
1067                 return 0;
1068         }
1069
1070         /* Flush RX and TX works */
1071         flush_work(&hdev->tx_work);
1072         flush_work(&hdev->rx_work);
1073
1074         if (hdev->discov_timeout > 0) {
1075                 cancel_delayed_work(&hdev->discov_off);
1076                 hdev->discov_timeout = 0;
1077                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1078         }
1079
1080         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1081                 cancel_delayed_work(&hdev->service_cache);
1082
1083         cancel_delayed_work_sync(&hdev->le_scan_disable);
1084
1085         hci_dev_lock(hdev);
1086         inquiry_cache_flush(hdev);
1087         hci_conn_hash_flush(hdev);
1088         hci_dev_unlock(hdev);
1089
1090         hci_notify(hdev, HCI_DEV_DOWN);
1091
1092         if (hdev->flush)
1093                 hdev->flush(hdev);
1094
1095         /* Reset device */
1096         skb_queue_purge(&hdev->cmd_q);
1097         atomic_set(&hdev->cmd_cnt, 1);
1098         if (!test_bit(HCI_RAW, &hdev->flags) &&
1099             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1100                 set_bit(HCI_INIT, &hdev->flags);
1101                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1102                 clear_bit(HCI_INIT, &hdev->flags);
1103         }
1104
1105         /* flush cmd  work */
1106         flush_work(&hdev->cmd_work);
1107
1108         /* Drop queues */
1109         skb_queue_purge(&hdev->rx_q);
1110         skb_queue_purge(&hdev->cmd_q);
1111         skb_queue_purge(&hdev->raw_q);
1112
1113         /* Drop last sent command */
1114         if (hdev->sent_cmd) {
1115                 del_timer_sync(&hdev->cmd_timer);
1116                 kfree_skb(hdev->sent_cmd);
1117                 hdev->sent_cmd = NULL;
1118         }
1119
1120         /* After this point our queues are empty
1121          * and no tasks are scheduled. */
1122         hdev->close(hdev);
1123
1124         /* Clear flags */
1125         hdev->flags = 0;
1126         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1127
1128         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1129             mgmt_valid_hdev(hdev)) {
1130                 hci_dev_lock(hdev);
1131                 mgmt_powered(hdev, 0);
1132                 hci_dev_unlock(hdev);
1133         }
1134
1135         /* Controller radio is available but is currently powered down */
1136         hdev->amp_status = 0;
1137
1138         memset(hdev->eir, 0, sizeof(hdev->eir));
1139         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1140
1141         hci_req_unlock(hdev);
1142
1143         hci_dev_put(hdev);
1144         return 0;
1145 }
1146
1147 int hci_dev_close(__u16 dev)
1148 {
1149         struct hci_dev *hdev;
1150         int err;
1151
1152         hdev = hci_dev_get(dev);
1153         if (!hdev)
1154                 return -ENODEV;
1155
1156         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1157                 cancel_delayed_work(&hdev->power_off);
1158
1159         err = hci_dev_do_close(hdev);
1160
1161         hci_dev_put(hdev);
1162         return err;
1163 }
1164
1165 int hci_dev_reset(__u16 dev)
1166 {
1167         struct hci_dev *hdev;
1168         int ret = 0;
1169
1170         hdev = hci_dev_get(dev);
1171         if (!hdev)
1172                 return -ENODEV;
1173
1174         hci_req_lock(hdev);
1175
1176         if (!test_bit(HCI_UP, &hdev->flags))
1177                 goto done;
1178
1179         /* Drop queues */
1180         skb_queue_purge(&hdev->rx_q);
1181         skb_queue_purge(&hdev->cmd_q);
1182
1183         hci_dev_lock(hdev);
1184         inquiry_cache_flush(hdev);
1185         hci_conn_hash_flush(hdev);
1186         hci_dev_unlock(hdev);
1187
1188         if (hdev->flush)
1189                 hdev->flush(hdev);
1190
1191         atomic_set(&hdev->cmd_cnt, 1);
1192         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1193
1194         if (!test_bit(HCI_RAW, &hdev->flags))
1195                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1196
1197 done:
1198         hci_req_unlock(hdev);
1199         hci_dev_put(hdev);
1200         return ret;
1201 }
1202
1203 int hci_dev_reset_stat(__u16 dev)
1204 {
1205         struct hci_dev *hdev;
1206         int ret = 0;
1207
1208         hdev = hci_dev_get(dev);
1209         if (!hdev)
1210                 return -ENODEV;
1211
1212         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1213
1214         hci_dev_put(hdev);
1215
1216         return ret;
1217 }
1218
1219 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1220 {
1221         struct hci_dev *hdev;
1222         struct hci_dev_req dr;
1223         int err = 0;
1224
1225         if (copy_from_user(&dr, arg, sizeof(dr)))
1226                 return -EFAULT;
1227
1228         hdev = hci_dev_get(dr.dev_id);
1229         if (!hdev)
1230                 return -ENODEV;
1231
1232         switch (cmd) {
1233         case HCISETAUTH:
1234                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1235                                    HCI_INIT_TIMEOUT);
1236                 break;
1237
1238         case HCISETENCRYPT:
1239                 if (!lmp_encrypt_capable(hdev)) {
1240                         err = -EOPNOTSUPP;
1241                         break;
1242                 }
1243
1244                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1245                         /* Auth must be enabled first */
1246                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1247                                            HCI_INIT_TIMEOUT);
1248                         if (err)
1249                                 break;
1250                 }
1251
1252                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1253                                    HCI_INIT_TIMEOUT);
1254                 break;
1255
1256         case HCISETSCAN:
1257                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1258                                    HCI_INIT_TIMEOUT);
1259                 break;
1260
1261         case HCISETLINKPOL:
1262                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1263                                    HCI_INIT_TIMEOUT);
1264                 break;
1265
1266         case HCISETLINKMODE:
1267                 hdev->link_mode = ((__u16) dr.dev_opt) &
1268                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1269                 break;
1270
1271         case HCISETPTYPE:
1272                 hdev->pkt_type = (__u16) dr.dev_opt;
1273                 break;
1274
1275         case HCISETACLMTU:
1276                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1277                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1278                 break;
1279
1280         case HCISETSCOMTU:
1281                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1282                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1283                 break;
1284
1285         default:
1286                 err = -EINVAL;
1287                 break;
1288         }
1289
1290         hci_dev_put(hdev);
1291         return err;
1292 }
1293
1294 int hci_get_dev_list(void __user *arg)
1295 {
1296         struct hci_dev *hdev;
1297         struct hci_dev_list_req *dl;
1298         struct hci_dev_req *dr;
1299         int n = 0, size, err;
1300         __u16 dev_num;
1301
1302         if (get_user(dev_num, (__u16 __user *) arg))
1303                 return -EFAULT;
1304
1305         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1306                 return -EINVAL;
1307
1308         size = sizeof(*dl) + dev_num * sizeof(*dr);
1309
1310         dl = kzalloc(size, GFP_KERNEL);
1311         if (!dl)
1312                 return -ENOMEM;
1313
1314         dr = dl->dev_req;
1315
1316         read_lock(&hci_dev_list_lock);
1317         list_for_each_entry(hdev, &hci_dev_list, list) {
1318                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1319                         cancel_delayed_work(&hdev->power_off);
1320
1321                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1322                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1323
1324                 (dr + n)->dev_id  = hdev->id;
1325                 (dr + n)->dev_opt = hdev->flags;
1326
1327                 if (++n >= dev_num)
1328                         break;
1329         }
1330         read_unlock(&hci_dev_list_lock);
1331
1332         dl->dev_num = n;
1333         size = sizeof(*dl) + n * sizeof(*dr);
1334
1335         err = copy_to_user(arg, dl, size);
1336         kfree(dl);
1337
1338         return err ? -EFAULT : 0;
1339 }
1340
1341 int hci_get_dev_info(void __user *arg)
1342 {
1343         struct hci_dev *hdev;
1344         struct hci_dev_info di;
1345         int err = 0;
1346
1347         if (copy_from_user(&di, arg, sizeof(di)))
1348                 return -EFAULT;
1349
1350         hdev = hci_dev_get(di.dev_id);
1351         if (!hdev)
1352                 return -ENODEV;
1353
1354         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1355                 cancel_delayed_work_sync(&hdev->power_off);
1356
1357         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1358                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1359
1360         strcpy(di.name, hdev->name);
1361         di.bdaddr   = hdev->bdaddr;
1362         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1363         di.flags    = hdev->flags;
1364         di.pkt_type = hdev->pkt_type;
1365         if (lmp_bredr_capable(hdev)) {
1366                 di.acl_mtu  = hdev->acl_mtu;
1367                 di.acl_pkts = hdev->acl_pkts;
1368                 di.sco_mtu  = hdev->sco_mtu;
1369                 di.sco_pkts = hdev->sco_pkts;
1370         } else {
1371                 di.acl_mtu  = hdev->le_mtu;
1372                 di.acl_pkts = hdev->le_pkts;
1373                 di.sco_mtu  = 0;
1374                 di.sco_pkts = 0;
1375         }
1376         di.link_policy = hdev->link_policy;
1377         di.link_mode   = hdev->link_mode;
1378
1379         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1380         memcpy(&di.features, &hdev->features, sizeof(di.features));
1381
1382         if (copy_to_user(arg, &di, sizeof(di)))
1383                 err = -EFAULT;
1384
1385         hci_dev_put(hdev);
1386
1387         return err;
1388 }
1389
1390 /* ---- Interface to HCI drivers ---- */
1391
1392 static int hci_rfkill_set_block(void *data, bool blocked)
1393 {
1394         struct hci_dev *hdev = data;
1395
1396         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1397
1398         if (!blocked)
1399                 return 0;
1400
1401         hci_dev_do_close(hdev);
1402
1403         return 0;
1404 }
1405
1406 static const struct rfkill_ops hci_rfkill_ops = {
1407         .set_block = hci_rfkill_set_block,
1408 };
1409
1410 static void hci_power_on(struct work_struct *work)
1411 {
1412         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1413
1414         BT_DBG("%s", hdev->name);
1415
1416         if (hci_dev_open(hdev->id) < 0)
1417                 return;
1418
1419         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1420                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1421                                    HCI_AUTO_OFF_TIMEOUT);
1422
1423         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1424                 mgmt_index_added(hdev);
1425 }
1426
1427 static void hci_power_off(struct work_struct *work)
1428 {
1429         struct hci_dev *hdev = container_of(work, struct hci_dev,
1430                                             power_off.work);
1431
1432         BT_DBG("%s", hdev->name);
1433
1434         hci_dev_do_close(hdev);
1435 }
1436
1437 static void hci_discov_off(struct work_struct *work)
1438 {
1439         struct hci_dev *hdev;
1440         u8 scan = SCAN_PAGE;
1441
1442         hdev = container_of(work, struct hci_dev, discov_off.work);
1443
1444         BT_DBG("%s", hdev->name);
1445
1446         hci_dev_lock(hdev);
1447
1448         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1449
1450         hdev->discov_timeout = 0;
1451
1452         hci_dev_unlock(hdev);
1453 }
1454
1455 int hci_uuids_clear(struct hci_dev *hdev)
1456 {
1457         struct bt_uuid *uuid, *tmp;
1458
1459         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1460                 list_del(&uuid->list);
1461                 kfree(uuid);
1462         }
1463
1464         return 0;
1465 }
1466
1467 int hci_link_keys_clear(struct hci_dev *hdev)
1468 {
1469         struct list_head *p, *n;
1470
1471         list_for_each_safe(p, n, &hdev->link_keys) {
1472                 struct link_key *key;
1473
1474                 key = list_entry(p, struct link_key, list);
1475
1476                 list_del(p);
1477                 kfree(key);
1478         }
1479
1480         return 0;
1481 }
1482
1483 int hci_smp_ltks_clear(struct hci_dev *hdev)
1484 {
1485         struct smp_ltk *k, *tmp;
1486
1487         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1488                 list_del(&k->list);
1489                 kfree(k);
1490         }
1491
1492         return 0;
1493 }
1494
1495 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1496 {
1497         struct link_key *k;
1498
1499         list_for_each_entry(k, &hdev->link_keys, list)
1500                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1501                         return k;
1502
1503         return NULL;
1504 }
1505
1506 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1507                                u8 key_type, u8 old_key_type)
1508 {
1509         /* Legacy key */
1510         if (key_type < 0x03)
1511                 return true;
1512
1513         /* Debug keys are insecure so don't store them persistently */
1514         if (key_type == HCI_LK_DEBUG_COMBINATION)
1515                 return false;
1516
1517         /* Changed combination key and there's no previous one */
1518         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1519                 return false;
1520
1521         /* Security mode 3 case */
1522         if (!conn)
1523                 return true;
1524
1525         /* Neither local nor remote side had no-bonding as requirement */
1526         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1527                 return true;
1528
1529         /* Local side had dedicated bonding as requirement */
1530         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1531                 return true;
1532
1533         /* Remote side had dedicated bonding as requirement */
1534         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1535                 return true;
1536
1537         /* If none of the above criteria match, then don't store the key
1538          * persistently */
1539         return false;
1540 }
1541
1542 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1543 {
1544         struct smp_ltk *k;
1545
1546         list_for_each_entry(k, &hdev->long_term_keys, list) {
1547                 if (k->ediv != ediv ||
1548                     memcmp(rand, k->rand, sizeof(k->rand)))
1549                         continue;
1550
1551                 return k;
1552         }
1553
1554         return NULL;
1555 }
1556
1557 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1558                                      u8 addr_type)
1559 {
1560         struct smp_ltk *k;
1561
1562         list_for_each_entry(k, &hdev->long_term_keys, list)
1563                 if (addr_type == k->bdaddr_type &&
1564                     bacmp(bdaddr, &k->bdaddr) == 0)
1565                         return k;
1566
1567         return NULL;
1568 }
1569
1570 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1571                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1572 {
1573         struct link_key *key, *old_key;
1574         u8 old_key_type;
1575         bool persistent;
1576
1577         old_key = hci_find_link_key(hdev, bdaddr);
1578         if (old_key) {
1579                 old_key_type = old_key->type;
1580                 key = old_key;
1581         } else {
1582                 old_key_type = conn ? conn->key_type : 0xff;
1583                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1584                 if (!key)
1585                         return -ENOMEM;
1586                 list_add(&key->list, &hdev->link_keys);
1587         }
1588
1589         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1590
1591         /* Some buggy controller combinations generate a changed
1592          * combination key for legacy pairing even when there's no
1593          * previous key */
1594         if (type == HCI_LK_CHANGED_COMBINATION &&
1595             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1596                 type = HCI_LK_COMBINATION;
1597                 if (conn)
1598                         conn->key_type = type;
1599         }
1600
1601         bacpy(&key->bdaddr, bdaddr);
1602         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1603         key->pin_len = pin_len;
1604
1605         if (type == HCI_LK_CHANGED_COMBINATION)
1606                 key->type = old_key_type;
1607         else
1608                 key->type = type;
1609
1610         if (!new_key)
1611                 return 0;
1612
1613         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1614
1615         mgmt_new_link_key(hdev, key, persistent);
1616
1617         if (conn)
1618                 conn->flush_key = !persistent;
1619
1620         return 0;
1621 }
1622
1623 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1624                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1625                 ediv, u8 rand[8])
1626 {
1627         struct smp_ltk *key, *old_key;
1628
1629         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1630                 return 0;
1631
1632         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1633         if (old_key)
1634                 key = old_key;
1635         else {
1636                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1637                 if (!key)
1638                         return -ENOMEM;
1639                 list_add(&key->list, &hdev->long_term_keys);
1640         }
1641
1642         bacpy(&key->bdaddr, bdaddr);
1643         key->bdaddr_type = addr_type;
1644         memcpy(key->val, tk, sizeof(key->val));
1645         key->authenticated = authenticated;
1646         key->ediv = ediv;
1647         key->enc_size = enc_size;
1648         key->type = type;
1649         memcpy(key->rand, rand, sizeof(key->rand));
1650
1651         if (!new_key)
1652                 return 0;
1653
1654         if (type & HCI_SMP_LTK)
1655                 mgmt_new_ltk(hdev, key, 1);
1656
1657         return 0;
1658 }
1659
1660 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1661 {
1662         struct link_key *key;
1663
1664         key = hci_find_link_key(hdev, bdaddr);
1665         if (!key)
1666                 return -ENOENT;
1667
1668         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1669
1670         list_del(&key->list);
1671         kfree(key);
1672
1673         return 0;
1674 }
1675
1676 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1677 {
1678         struct smp_ltk *k, *tmp;
1679
1680         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1681                 if (bacmp(bdaddr, &k->bdaddr))
1682                         continue;
1683
1684                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1685
1686                 list_del(&k->list);
1687                 kfree(k);
1688         }
1689
1690         return 0;
1691 }
1692
1693 /* HCI command timer function */
1694 static void hci_cmd_timeout(unsigned long arg)
1695 {
1696         struct hci_dev *hdev = (void *) arg;
1697
1698         if (hdev->sent_cmd) {
1699                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1700                 u16 opcode = __le16_to_cpu(sent->opcode);
1701
1702                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1703         } else {
1704                 BT_ERR("%s command tx timeout", hdev->name);
1705         }
1706
1707         atomic_set(&hdev->cmd_cnt, 1);
1708         queue_work(hdev->workqueue, &hdev->cmd_work);
1709 }
1710
1711 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1712                                           bdaddr_t *bdaddr)
1713 {
1714         struct oob_data *data;
1715
1716         list_for_each_entry(data, &hdev->remote_oob_data, list)
1717                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1718                         return data;
1719
1720         return NULL;
1721 }
1722
1723 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1724 {
1725         struct oob_data *data;
1726
1727         data = hci_find_remote_oob_data(hdev, bdaddr);
1728         if (!data)
1729                 return -ENOENT;
1730
1731         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1732
1733         list_del(&data->list);
1734         kfree(data);
1735
1736         return 0;
1737 }
1738
1739 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1740 {
1741         struct oob_data *data, *n;
1742
1743         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1744                 list_del(&data->list);
1745                 kfree(data);
1746         }
1747
1748         return 0;
1749 }
1750
1751 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1752                             u8 *randomizer)
1753 {
1754         struct oob_data *data;
1755
1756         data = hci_find_remote_oob_data(hdev, bdaddr);
1757
1758         if (!data) {
1759                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1760                 if (!data)
1761                         return -ENOMEM;
1762
1763                 bacpy(&data->bdaddr, bdaddr);
1764                 list_add(&data->list, &hdev->remote_oob_data);
1765         }
1766
1767         memcpy(data->hash, hash, sizeof(data->hash));
1768         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1769
1770         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1771
1772         return 0;
1773 }
1774
1775 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1776 {
1777         struct bdaddr_list *b;
1778
1779         list_for_each_entry(b, &hdev->blacklist, list)
1780                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1781                         return b;
1782
1783         return NULL;
1784 }
1785
1786 int hci_blacklist_clear(struct hci_dev *hdev)
1787 {
1788         struct list_head *p, *n;
1789
1790         list_for_each_safe(p, n, &hdev->blacklist) {
1791                 struct bdaddr_list *b;
1792
1793                 b = list_entry(p, struct bdaddr_list, list);
1794
1795                 list_del(p);
1796                 kfree(b);
1797         }
1798
1799         return 0;
1800 }
1801
1802 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1803 {
1804         struct bdaddr_list *entry;
1805
1806         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1807                 return -EBADF;
1808
1809         if (hci_blacklist_lookup(hdev, bdaddr))
1810                 return -EEXIST;
1811
1812         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1813         if (!entry)
1814                 return -ENOMEM;
1815
1816         bacpy(&entry->bdaddr, bdaddr);
1817
1818         list_add(&entry->list, &hdev->blacklist);
1819
1820         return mgmt_device_blocked(hdev, bdaddr, type);
1821 }
1822
1823 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1824 {
1825         struct bdaddr_list *entry;
1826
1827         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1828                 return hci_blacklist_clear(hdev);
1829
1830         entry = hci_blacklist_lookup(hdev, bdaddr);
1831         if (!entry)
1832                 return -ENOENT;
1833
1834         list_del(&entry->list);
1835         kfree(entry);
1836
1837         return mgmt_device_unblocked(hdev, bdaddr, type);
1838 }
1839
1840 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1841 {
1842         struct le_scan_params *param =  (struct le_scan_params *) opt;
1843         struct hci_cp_le_set_scan_param cp;
1844
1845         memset(&cp, 0, sizeof(cp));
1846         cp.type = param->type;
1847         cp.interval = cpu_to_le16(param->interval);
1848         cp.window = cpu_to_le16(param->window);
1849
1850         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1851 }
1852
1853 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1854 {
1855         struct hci_cp_le_set_scan_enable cp;
1856
1857         memset(&cp, 0, sizeof(cp));
1858         cp.enable = 1;
1859         cp.filter_dup = 1;
1860
1861         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1862 }
1863
1864 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1865                           u16 window, int timeout)
1866 {
1867         long timeo = msecs_to_jiffies(3000);
1868         struct le_scan_params param;
1869         int err;
1870
1871         BT_DBG("%s", hdev->name);
1872
1873         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1874                 return -EINPROGRESS;
1875
1876         param.type = type;
1877         param.interval = interval;
1878         param.window = window;
1879
1880         hci_req_lock(hdev);
1881
1882         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1883                              timeo);
1884         if (!err)
1885                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1886
1887         hci_req_unlock(hdev);
1888
1889         if (err < 0)
1890                 return err;
1891
1892         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1893                            msecs_to_jiffies(timeout));
1894
1895         return 0;
1896 }
1897
1898 int hci_cancel_le_scan(struct hci_dev *hdev)
1899 {
1900         BT_DBG("%s", hdev->name);
1901
1902         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1903                 return -EALREADY;
1904
1905         if (cancel_delayed_work(&hdev->le_scan_disable)) {
1906                 struct hci_cp_le_set_scan_enable cp;
1907
1908                 /* Send HCI command to disable LE Scan */
1909                 memset(&cp, 0, sizeof(cp));
1910                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1911         }
1912
1913         return 0;
1914 }
1915
1916 static void le_scan_disable_work(struct work_struct *work)
1917 {
1918         struct hci_dev *hdev = container_of(work, struct hci_dev,
1919                                             le_scan_disable.work);
1920         struct hci_cp_le_set_scan_enable cp;
1921
1922         BT_DBG("%s", hdev->name);
1923
1924         memset(&cp, 0, sizeof(cp));
1925
1926         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1927 }
1928
1929 static void le_scan_work(struct work_struct *work)
1930 {
1931         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1932         struct le_scan_params *param = &hdev->le_scan_params;
1933
1934         BT_DBG("%s", hdev->name);
1935
1936         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1937                        param->timeout);
1938 }
1939
1940 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1941                 int timeout)
1942 {
1943         struct le_scan_params *param = &hdev->le_scan_params;
1944
1945         BT_DBG("%s", hdev->name);
1946
1947         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1948                 return -ENOTSUPP;
1949
1950         if (work_busy(&hdev->le_scan))
1951                 return -EINPROGRESS;
1952
1953         param->type = type;
1954         param->interval = interval;
1955         param->window = window;
1956         param->timeout = timeout;
1957
1958         queue_work(system_long_wq, &hdev->le_scan);
1959
1960         return 0;
1961 }
1962
1963 /* Alloc HCI device */
1964 struct hci_dev *hci_alloc_dev(void)
1965 {
1966         struct hci_dev *hdev;
1967
1968         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1969         if (!hdev)
1970                 return NULL;
1971
1972         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1973         hdev->esco_type = (ESCO_HV1);
1974         hdev->link_mode = (HCI_LM_ACCEPT);
1975         hdev->io_capability = 0x03; /* No Input No Output */
1976         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1977         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1978
1979         hdev->sniff_max_interval = 800;
1980         hdev->sniff_min_interval = 80;
1981
1982         mutex_init(&hdev->lock);
1983         mutex_init(&hdev->req_lock);
1984
1985         INIT_LIST_HEAD(&hdev->mgmt_pending);
1986         INIT_LIST_HEAD(&hdev->blacklist);
1987         INIT_LIST_HEAD(&hdev->uuids);
1988         INIT_LIST_HEAD(&hdev->link_keys);
1989         INIT_LIST_HEAD(&hdev->long_term_keys);
1990         INIT_LIST_HEAD(&hdev->remote_oob_data);
1991         INIT_LIST_HEAD(&hdev->conn_hash.list);
1992
1993         INIT_WORK(&hdev->rx_work, hci_rx_work);
1994         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1995         INIT_WORK(&hdev->tx_work, hci_tx_work);
1996         INIT_WORK(&hdev->power_on, hci_power_on);
1997         INIT_WORK(&hdev->le_scan, le_scan_work);
1998
1999         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2000         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2001         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2002
2003         skb_queue_head_init(&hdev->driver_init);
2004         skb_queue_head_init(&hdev->rx_q);
2005         skb_queue_head_init(&hdev->cmd_q);
2006         skb_queue_head_init(&hdev->raw_q);
2007
2008         init_waitqueue_head(&hdev->req_wait_q);
2009
2010         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2011
2012         hci_init_sysfs(hdev);
2013         discovery_init(hdev);
2014
2015         return hdev;
2016 }
2017 EXPORT_SYMBOL(hci_alloc_dev);
2018
2019 /* Free HCI device */
2020 void hci_free_dev(struct hci_dev *hdev)
2021 {
2022         skb_queue_purge(&hdev->driver_init);
2023
2024         /* will free via device release */
2025         put_device(&hdev->dev);
2026 }
2027 EXPORT_SYMBOL(hci_free_dev);
2028
2029 /* Register HCI device */
2030 int hci_register_dev(struct hci_dev *hdev)
2031 {
2032         int id, error;
2033
2034         if (!hdev->open || !hdev->close)
2035                 return -EINVAL;
2036
2037         /* Do not allow HCI_AMP devices to register at index 0,
2038          * so the index can be used as the AMP controller ID.
2039          */
2040         switch (hdev->dev_type) {
2041         case HCI_BREDR:
2042                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2043                 break;
2044         case HCI_AMP:
2045                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2046                 break;
2047         default:
2048                 return -EINVAL;
2049         }
2050
2051         if (id < 0)
2052                 return id;
2053
2054         sprintf(hdev->name, "hci%d", id);
2055         hdev->id = id;
2056
2057         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2058
2059         write_lock(&hci_dev_list_lock);
2060         list_add(&hdev->list, &hci_dev_list);
2061         write_unlock(&hci_dev_list_lock);
2062
2063         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2064                                           WQ_MEM_RECLAIM, 1);
2065         if (!hdev->workqueue) {
2066                 error = -ENOMEM;
2067                 goto err;
2068         }
2069
2070         hdev->req_workqueue = alloc_workqueue(hdev->name,
2071                                               WQ_HIGHPRI | WQ_UNBOUND |
2072                                               WQ_MEM_RECLAIM, 1);
2073         if (!hdev->req_workqueue) {
2074                 destroy_workqueue(hdev->workqueue);
2075                 error = -ENOMEM;
2076                 goto err;
2077         }
2078
2079         error = hci_add_sysfs(hdev);
2080         if (error < 0)
2081                 goto err_wqueue;
2082
2083         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2084                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2085                                     hdev);
2086         if (hdev->rfkill) {
2087                 if (rfkill_register(hdev->rfkill) < 0) {
2088                         rfkill_destroy(hdev->rfkill);
2089                         hdev->rfkill = NULL;
2090                 }
2091         }
2092
2093         set_bit(HCI_SETUP, &hdev->dev_flags);
2094
2095         if (hdev->dev_type != HCI_AMP)
2096                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2097
2098         hci_notify(hdev, HCI_DEV_REG);
2099         hci_dev_hold(hdev);
2100
2101         queue_work(hdev->req_workqueue, &hdev->power_on);
2102
2103         return id;
2104
2105 err_wqueue:
2106         destroy_workqueue(hdev->workqueue);
2107         destroy_workqueue(hdev->req_workqueue);
2108 err:
2109         ida_simple_remove(&hci_index_ida, hdev->id);
2110         write_lock(&hci_dev_list_lock);
2111         list_del(&hdev->list);
2112         write_unlock(&hci_dev_list_lock);
2113
2114         return error;
2115 }
2116 EXPORT_SYMBOL(hci_register_dev);
2117
2118 /* Unregister HCI device */
2119 void hci_unregister_dev(struct hci_dev *hdev)
2120 {
2121         int i, id;
2122
2123         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2124
2125         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2126
2127         id = hdev->id;
2128
2129         write_lock(&hci_dev_list_lock);
2130         list_del(&hdev->list);
2131         write_unlock(&hci_dev_list_lock);
2132
2133         hci_dev_do_close(hdev);
2134
2135         for (i = 0; i < NUM_REASSEMBLY; i++)
2136                 kfree_skb(hdev->reassembly[i]);
2137
2138         cancel_work_sync(&hdev->power_on);
2139
2140         if (!test_bit(HCI_INIT, &hdev->flags) &&
2141             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2142                 hci_dev_lock(hdev);
2143                 mgmt_index_removed(hdev);
2144                 hci_dev_unlock(hdev);
2145         }
2146
2147         /* mgmt_index_removed should take care of emptying the
2148          * pending list */
2149         BUG_ON(!list_empty(&hdev->mgmt_pending));
2150
2151         hci_notify(hdev, HCI_DEV_UNREG);
2152
2153         if (hdev->rfkill) {
2154                 rfkill_unregister(hdev->rfkill);
2155                 rfkill_destroy(hdev->rfkill);
2156         }
2157
2158         hci_del_sysfs(hdev);
2159
2160         destroy_workqueue(hdev->workqueue);
2161         destroy_workqueue(hdev->req_workqueue);
2162
2163         hci_dev_lock(hdev);
2164         hci_blacklist_clear(hdev);
2165         hci_uuids_clear(hdev);
2166         hci_link_keys_clear(hdev);
2167         hci_smp_ltks_clear(hdev);
2168         hci_remote_oob_data_clear(hdev);
2169         hci_dev_unlock(hdev);
2170
2171         hci_dev_put(hdev);
2172
2173         ida_simple_remove(&hci_index_ida, id);
2174 }
2175 EXPORT_SYMBOL(hci_unregister_dev);
2176
2177 /* Suspend HCI device */
2178 int hci_suspend_dev(struct hci_dev *hdev)
2179 {
2180         hci_notify(hdev, HCI_DEV_SUSPEND);
2181         return 0;
2182 }
2183 EXPORT_SYMBOL(hci_suspend_dev);
2184
2185 /* Resume HCI device */
2186 int hci_resume_dev(struct hci_dev *hdev)
2187 {
2188         hci_notify(hdev, HCI_DEV_RESUME);
2189         return 0;
2190 }
2191 EXPORT_SYMBOL(hci_resume_dev);
2192
2193 /* Receive frame from HCI drivers */
2194 int hci_recv_frame(struct sk_buff *skb)
2195 {
2196         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2197         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2198                       && !test_bit(HCI_INIT, &hdev->flags))) {
2199                 kfree_skb(skb);
2200                 return -ENXIO;
2201         }
2202
2203         /* Incoming skb */
2204         bt_cb(skb)->incoming = 1;
2205
2206         /* Time stamp */
2207         __net_timestamp(skb);
2208
2209         skb_queue_tail(&hdev->rx_q, skb);
2210         queue_work(hdev->workqueue, &hdev->rx_work);
2211
2212         return 0;
2213 }
2214 EXPORT_SYMBOL(hci_recv_frame);
2215
2216 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2217                           int count, __u8 index)
2218 {
2219         int len = 0;
2220         int hlen = 0;
2221         int remain = count;
2222         struct sk_buff *skb;
2223         struct bt_skb_cb *scb;
2224
2225         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2226             index >= NUM_REASSEMBLY)
2227                 return -EILSEQ;
2228
2229         skb = hdev->reassembly[index];
2230
2231         if (!skb) {
2232                 switch (type) {
2233                 case HCI_ACLDATA_PKT:
2234                         len = HCI_MAX_FRAME_SIZE;
2235                         hlen = HCI_ACL_HDR_SIZE;
2236                         break;
2237                 case HCI_EVENT_PKT:
2238                         len = HCI_MAX_EVENT_SIZE;
2239                         hlen = HCI_EVENT_HDR_SIZE;
2240                         break;
2241                 case HCI_SCODATA_PKT:
2242                         len = HCI_MAX_SCO_SIZE;
2243                         hlen = HCI_SCO_HDR_SIZE;
2244                         break;
2245                 }
2246
2247                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2248                 if (!skb)
2249                         return -ENOMEM;
2250
2251                 scb = (void *) skb->cb;
2252                 scb->expect = hlen;
2253                 scb->pkt_type = type;
2254
2255                 skb->dev = (void *) hdev;
2256                 hdev->reassembly[index] = skb;
2257         }
2258
2259         while (count) {
2260                 scb = (void *) skb->cb;
2261                 len = min_t(uint, scb->expect, count);
2262
2263                 memcpy(skb_put(skb, len), data, len);
2264
2265                 count -= len;
2266                 data += len;
2267                 scb->expect -= len;
2268                 remain = count;
2269
2270                 switch (type) {
2271                 case HCI_EVENT_PKT:
2272                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2273                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2274                                 scb->expect = h->plen;
2275
2276                                 if (skb_tailroom(skb) < scb->expect) {
2277                                         kfree_skb(skb);
2278                                         hdev->reassembly[index] = NULL;
2279                                         return -ENOMEM;
2280                                 }
2281                         }
2282                         break;
2283
2284                 case HCI_ACLDATA_PKT:
2285                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2286                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2287                                 scb->expect = __le16_to_cpu(h->dlen);
2288
2289                                 if (skb_tailroom(skb) < scb->expect) {
2290                                         kfree_skb(skb);
2291                                         hdev->reassembly[index] = NULL;
2292                                         return -ENOMEM;
2293                                 }
2294                         }
2295                         break;
2296
2297                 case HCI_SCODATA_PKT:
2298                         if (skb->len == HCI_SCO_HDR_SIZE) {
2299                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2300                                 scb->expect = h->dlen;
2301
2302                                 if (skb_tailroom(skb) < scb->expect) {
2303                                         kfree_skb(skb);
2304                                         hdev->reassembly[index] = NULL;
2305                                         return -ENOMEM;
2306                                 }
2307                         }
2308                         break;
2309                 }
2310
2311                 if (scb->expect == 0) {
2312                         /* Complete frame */
2313
2314                         bt_cb(skb)->pkt_type = type;
2315                         hci_recv_frame(skb);
2316
2317                         hdev->reassembly[index] = NULL;
2318                         return remain;
2319                 }
2320         }
2321
2322         return remain;
2323 }
2324
2325 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2326 {
2327         int rem = 0;
2328
2329         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2330                 return -EILSEQ;
2331
2332         while (count) {
2333                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2334                 if (rem < 0)
2335                         return rem;
2336
2337                 data += (count - rem);
2338                 count = rem;
2339         }
2340
2341         return rem;
2342 }
2343 EXPORT_SYMBOL(hci_recv_fragment);
2344
2345 #define STREAM_REASSEMBLY 0
2346
2347 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2348 {
2349         int type;
2350         int rem = 0;
2351
2352         while (count) {
2353                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2354
2355                 if (!skb) {
2356                         struct { char type; } *pkt;
2357
2358                         /* Start of the frame */
2359                         pkt = data;
2360                         type = pkt->type;
2361
2362                         data++;
2363                         count--;
2364                 } else
2365                         type = bt_cb(skb)->pkt_type;
2366
2367                 rem = hci_reassembly(hdev, type, data, count,
2368                                      STREAM_REASSEMBLY);
2369                 if (rem < 0)
2370                         return rem;
2371
2372                 data += (count - rem);
2373                 count = rem;
2374         }
2375
2376         return rem;
2377 }
2378 EXPORT_SYMBOL(hci_recv_stream_fragment);
2379
2380 /* ---- Interface to upper protocols ---- */
2381
2382 int hci_register_cb(struct hci_cb *cb)
2383 {
2384         BT_DBG("%p name %s", cb, cb->name);
2385
2386         write_lock(&hci_cb_list_lock);
2387         list_add(&cb->list, &hci_cb_list);
2388         write_unlock(&hci_cb_list_lock);
2389
2390         return 0;
2391 }
2392 EXPORT_SYMBOL(hci_register_cb);
2393
2394 int hci_unregister_cb(struct hci_cb *cb)
2395 {
2396         BT_DBG("%p name %s", cb, cb->name);
2397
2398         write_lock(&hci_cb_list_lock);
2399         list_del(&cb->list);
2400         write_unlock(&hci_cb_list_lock);
2401
2402         return 0;
2403 }
2404 EXPORT_SYMBOL(hci_unregister_cb);
2405
2406 static int hci_send_frame(struct sk_buff *skb)
2407 {
2408         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2409
2410         if (!hdev) {
2411                 kfree_skb(skb);
2412                 return -ENODEV;
2413         }
2414
2415         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2416
2417         /* Time stamp */
2418         __net_timestamp(skb);
2419
2420         /* Send copy to monitor */
2421         hci_send_to_monitor(hdev, skb);
2422
2423         if (atomic_read(&hdev->promisc)) {
2424                 /* Send copy to the sockets */
2425                 hci_send_to_sock(hdev, skb);
2426         }
2427
2428         /* Get rid of skb owner, prior to sending to the driver. */
2429         skb_orphan(skb);
2430
2431         return hdev->send(skb);
2432 }
2433
2434 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2435 {
2436         skb_queue_head_init(&req->cmd_q);
2437         req->hdev = hdev;
2438         req->err = 0;
2439 }
2440
2441 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2442 {
2443         struct hci_dev *hdev = req->hdev;
2444         struct sk_buff *skb;
2445         unsigned long flags;
2446
2447         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2448
2449         /* If an error occured during request building, remove all HCI
2450          * commands queued on the HCI request queue.
2451          */
2452         if (req->err) {
2453                 skb_queue_purge(&req->cmd_q);
2454                 return req->err;
2455         }
2456
2457         /* Do not allow empty requests */
2458         if (skb_queue_empty(&req->cmd_q))
2459                 return -ENODATA;
2460
2461         skb = skb_peek_tail(&req->cmd_q);
2462         bt_cb(skb)->req.complete = complete;
2463
2464         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2465         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2466         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2467
2468         queue_work(hdev->workqueue, &hdev->cmd_work);
2469
2470         return 0;
2471 }
2472
2473 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2474                                        u32 plen, void *param)
2475 {
2476         int len = HCI_COMMAND_HDR_SIZE + plen;
2477         struct hci_command_hdr *hdr;
2478         struct sk_buff *skb;
2479
2480         skb = bt_skb_alloc(len, GFP_ATOMIC);
2481         if (!skb)
2482                 return NULL;
2483
2484         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2485         hdr->opcode = cpu_to_le16(opcode);
2486         hdr->plen   = plen;
2487
2488         if (plen)
2489                 memcpy(skb_put(skb, plen), param, plen);
2490
2491         BT_DBG("skb len %d", skb->len);
2492
2493         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2494         skb->dev = (void *) hdev;
2495
2496         return skb;
2497 }
2498
2499 /* Send HCI command */
2500 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2501 {
2502         struct sk_buff *skb;
2503
2504         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2505
2506         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2507         if (!skb) {
2508                 BT_ERR("%s no memory for command", hdev->name);
2509                 return -ENOMEM;
2510         }
2511
2512         /* Stand-alone HCI commands must be flaged as
2513          * single-command requests.
2514          */
2515         bt_cb(skb)->req.start = true;
2516
2517         skb_queue_tail(&hdev->cmd_q, skb);
2518         queue_work(hdev->workqueue, &hdev->cmd_work);
2519
2520         return 0;
2521 }
2522
2523 /* Queue a command to an asynchronous HCI request */
2524 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2525 {
2526         struct hci_dev *hdev = req->hdev;
2527         struct sk_buff *skb;
2528
2529         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2530
2531         /* If an error occured during request building, there is no point in
2532          * queueing the HCI command. We can simply return.
2533          */
2534         if (req->err)
2535                 return;
2536
2537         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2538         if (!skb) {
2539                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2540                        hdev->name, opcode);
2541                 req->err = -ENOMEM;
2542                 return;
2543         }
2544
2545         if (skb_queue_empty(&req->cmd_q))
2546                 bt_cb(skb)->req.start = true;
2547
2548         skb_queue_tail(&req->cmd_q, skb);
2549 }
2550
2551 /* Get data from the previously sent command */
2552 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2553 {
2554         struct hci_command_hdr *hdr;
2555
2556         if (!hdev->sent_cmd)
2557                 return NULL;
2558
2559         hdr = (void *) hdev->sent_cmd->data;
2560
2561         if (hdr->opcode != cpu_to_le16(opcode))
2562                 return NULL;
2563
2564         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2565
2566         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2567 }
2568
2569 /* Send ACL data */
2570 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2571 {
2572         struct hci_acl_hdr *hdr;
2573         int len = skb->len;
2574
2575         skb_push(skb, HCI_ACL_HDR_SIZE);
2576         skb_reset_transport_header(skb);
2577         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2578         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2579         hdr->dlen   = cpu_to_le16(len);
2580 }
2581
2582 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2583                           struct sk_buff *skb, __u16 flags)
2584 {
2585         struct hci_conn *conn = chan->conn;
2586         struct hci_dev *hdev = conn->hdev;
2587         struct sk_buff *list;
2588
2589         skb->len = skb_headlen(skb);
2590         skb->data_len = 0;
2591
2592         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2593
2594         switch (hdev->dev_type) {
2595         case HCI_BREDR:
2596                 hci_add_acl_hdr(skb, conn->handle, flags);
2597                 break;
2598         case HCI_AMP:
2599                 hci_add_acl_hdr(skb, chan->handle, flags);
2600                 break;
2601         default:
2602                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2603                 return;
2604         }
2605
2606         list = skb_shinfo(skb)->frag_list;
2607         if (!list) {
2608                 /* Non fragmented */
2609                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2610
2611                 skb_queue_tail(queue, skb);
2612         } else {
2613                 /* Fragmented */
2614                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2615
2616                 skb_shinfo(skb)->frag_list = NULL;
2617
2618                 /* Queue all fragments atomically */
2619                 spin_lock(&queue->lock);
2620
2621                 __skb_queue_tail(queue, skb);
2622
2623                 flags &= ~ACL_START;
2624                 flags |= ACL_CONT;
2625                 do {
2626                         skb = list; list = list->next;
2627
2628                         skb->dev = (void *) hdev;
2629                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2630                         hci_add_acl_hdr(skb, conn->handle, flags);
2631
2632                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2633
2634                         __skb_queue_tail(queue, skb);
2635                 } while (list);
2636
2637                 spin_unlock(&queue->lock);
2638         }
2639 }
2640
2641 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2642 {
2643         struct hci_dev *hdev = chan->conn->hdev;
2644
2645         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2646
2647         skb->dev = (void *) hdev;
2648
2649         hci_queue_acl(chan, &chan->data_q, skb, flags);
2650
2651         queue_work(hdev->workqueue, &hdev->tx_work);
2652 }
2653
2654 /* Send SCO data */
2655 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2656 {
2657         struct hci_dev *hdev = conn->hdev;
2658         struct hci_sco_hdr hdr;
2659
2660         BT_DBG("%s len %d", hdev->name, skb->len);
2661
2662         hdr.handle = cpu_to_le16(conn->handle);
2663         hdr.dlen   = skb->len;
2664
2665         skb_push(skb, HCI_SCO_HDR_SIZE);
2666         skb_reset_transport_header(skb);
2667         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2668
2669         skb->dev = (void *) hdev;
2670         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2671
2672         skb_queue_tail(&conn->data_q, skb);
2673         queue_work(hdev->workqueue, &hdev->tx_work);
2674 }
2675
2676 /* ---- HCI TX task (outgoing data) ---- */
2677
2678 /* HCI Connection scheduler */
2679 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2680                                      int *quote)
2681 {
2682         struct hci_conn_hash *h = &hdev->conn_hash;
2683         struct hci_conn *conn = NULL, *c;
2684         unsigned int num = 0, min = ~0;
2685
2686         /* We don't have to lock device here. Connections are always
2687          * added and removed with TX task disabled. */
2688
2689         rcu_read_lock();
2690
2691         list_for_each_entry_rcu(c, &h->list, list) {
2692                 if (c->type != type || skb_queue_empty(&c->data_q))
2693                         continue;
2694
2695                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2696                         continue;
2697
2698                 num++;
2699
2700                 if (c->sent < min) {
2701                         min  = c->sent;
2702                         conn = c;
2703                 }
2704
2705                 if (hci_conn_num(hdev, type) == num)
2706                         break;
2707         }
2708
2709         rcu_read_unlock();
2710
2711         if (conn) {
2712                 int cnt, q;
2713
2714                 switch (conn->type) {
2715                 case ACL_LINK:
2716                         cnt = hdev->acl_cnt;
2717                         break;
2718                 case SCO_LINK:
2719                 case ESCO_LINK:
2720                         cnt = hdev->sco_cnt;
2721                         break;
2722                 case LE_LINK:
2723                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2724                         break;
2725                 default:
2726                         cnt = 0;
2727                         BT_ERR("Unknown link type");
2728                 }
2729
2730                 q = cnt / num;
2731                 *quote = q ? q : 1;
2732         } else
2733                 *quote = 0;
2734
2735         BT_DBG("conn %p quote %d", conn, *quote);
2736         return conn;
2737 }
2738
2739 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2740 {
2741         struct hci_conn_hash *h = &hdev->conn_hash;
2742         struct hci_conn *c;
2743
2744         BT_ERR("%s link tx timeout", hdev->name);
2745
2746         rcu_read_lock();
2747
2748         /* Kill stalled connections */
2749         list_for_each_entry_rcu(c, &h->list, list) {
2750                 if (c->type == type && c->sent) {
2751                         BT_ERR("%s killing stalled connection %pMR",
2752                                hdev->name, &c->dst);
2753                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2754                 }
2755         }
2756
2757         rcu_read_unlock();
2758 }
2759
2760 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2761                                       int *quote)
2762 {
2763         struct hci_conn_hash *h = &hdev->conn_hash;
2764         struct hci_chan *chan = NULL;
2765         unsigned int num = 0, min = ~0, cur_prio = 0;
2766         struct hci_conn *conn;
2767         int cnt, q, conn_num = 0;
2768
2769         BT_DBG("%s", hdev->name);
2770
2771         rcu_read_lock();
2772
2773         list_for_each_entry_rcu(conn, &h->list, list) {
2774                 struct hci_chan *tmp;
2775
2776                 if (conn->type != type)
2777                         continue;
2778
2779                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2780                         continue;
2781
2782                 conn_num++;
2783
2784                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2785                         struct sk_buff *skb;
2786
2787                         if (skb_queue_empty(&tmp->data_q))
2788                                 continue;
2789
2790                         skb = skb_peek(&tmp->data_q);
2791                         if (skb->priority < cur_prio)
2792                                 continue;
2793
2794                         if (skb->priority > cur_prio) {
2795                                 num = 0;
2796                                 min = ~0;
2797                                 cur_prio = skb->priority;
2798                         }
2799
2800                         num++;
2801
2802                         if (conn->sent < min) {
2803                                 min  = conn->sent;
2804                                 chan = tmp;
2805                         }
2806                 }
2807
2808                 if (hci_conn_num(hdev, type) == conn_num)
2809                         break;
2810         }
2811
2812         rcu_read_unlock();
2813
2814         if (!chan)
2815                 return NULL;
2816
2817         switch (chan->conn->type) {
2818         case ACL_LINK:
2819                 cnt = hdev->acl_cnt;
2820                 break;
2821         case AMP_LINK:
2822                 cnt = hdev->block_cnt;
2823                 break;
2824         case SCO_LINK:
2825         case ESCO_LINK:
2826                 cnt = hdev->sco_cnt;
2827                 break;
2828         case LE_LINK:
2829                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2830                 break;
2831         default:
2832                 cnt = 0;
2833                 BT_ERR("Unknown link type");
2834         }
2835
2836         q = cnt / num;
2837         *quote = q ? q : 1;
2838         BT_DBG("chan %p quote %d", chan, *quote);
2839         return chan;
2840 }
2841
2842 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2843 {
2844         struct hci_conn_hash *h = &hdev->conn_hash;
2845         struct hci_conn *conn;
2846         int num = 0;
2847
2848         BT_DBG("%s", hdev->name);
2849
2850         rcu_read_lock();
2851
2852         list_for_each_entry_rcu(conn, &h->list, list) {
2853                 struct hci_chan *chan;
2854
2855                 if (conn->type != type)
2856                         continue;
2857
2858                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2859                         continue;
2860
2861                 num++;
2862
2863                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2864                         struct sk_buff *skb;
2865
2866                         if (chan->sent) {
2867                                 chan->sent = 0;
2868                                 continue;
2869                         }
2870
2871                         if (skb_queue_empty(&chan->data_q))
2872                                 continue;
2873
2874                         skb = skb_peek(&chan->data_q);
2875                         if (skb->priority >= HCI_PRIO_MAX - 1)
2876                                 continue;
2877
2878                         skb->priority = HCI_PRIO_MAX - 1;
2879
2880                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2881                                skb->priority);
2882                 }
2883
2884                 if (hci_conn_num(hdev, type) == num)
2885                         break;
2886         }
2887
2888         rcu_read_unlock();
2889
2890 }
2891
2892 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2893 {
2894         /* Calculate count of blocks used by this packet */
2895         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2896 }
2897
2898 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2899 {
2900         if (!test_bit(HCI_RAW, &hdev->flags)) {
2901                 /* ACL tx timeout must be longer than maximum
2902                  * link supervision timeout (40.9 seconds) */
2903                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2904                                        HCI_ACL_TX_TIMEOUT))
2905                         hci_link_tx_to(hdev, ACL_LINK);
2906         }
2907 }
2908
2909 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2910 {
2911         unsigned int cnt = hdev->acl_cnt;
2912         struct hci_chan *chan;
2913         struct sk_buff *skb;
2914         int quote;
2915
2916         __check_timeout(hdev, cnt);
2917
2918         while (hdev->acl_cnt &&
2919                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2920                 u32 priority = (skb_peek(&chan->data_q))->priority;
2921                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2922                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2923                                skb->len, skb->priority);
2924
2925                         /* Stop if priority has changed */
2926                         if (skb->priority < priority)
2927                                 break;
2928
2929                         skb = skb_dequeue(&chan->data_q);
2930
2931                         hci_conn_enter_active_mode(chan->conn,
2932                                                    bt_cb(skb)->force_active);
2933
2934                         hci_send_frame(skb);
2935                         hdev->acl_last_tx = jiffies;
2936
2937                         hdev->acl_cnt--;
2938                         chan->sent++;
2939                         chan->conn->sent++;
2940                 }
2941         }
2942
2943         if (cnt != hdev->acl_cnt)
2944                 hci_prio_recalculate(hdev, ACL_LINK);
2945 }
2946
2947 static void hci_sched_acl_blk(struct hci_dev *hdev)
2948 {
2949         unsigned int cnt = hdev->block_cnt;
2950         struct hci_chan *chan;
2951         struct sk_buff *skb;
2952         int quote;
2953         u8 type;
2954
2955         __check_timeout(hdev, cnt);
2956
2957         BT_DBG("%s", hdev->name);
2958
2959         if (hdev->dev_type == HCI_AMP)
2960                 type = AMP_LINK;
2961         else
2962                 type = ACL_LINK;
2963
2964         while (hdev->block_cnt > 0 &&
2965                (chan = hci_chan_sent(hdev, type, &quote))) {
2966                 u32 priority = (skb_peek(&chan->data_q))->priority;
2967                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2968                         int blocks;
2969
2970                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2971                                skb->len, skb->priority);
2972
2973                         /* Stop if priority has changed */
2974                         if (skb->priority < priority)
2975                                 break;
2976
2977                         skb = skb_dequeue(&chan->data_q);
2978
2979                         blocks = __get_blocks(hdev, skb);
2980                         if (blocks > hdev->block_cnt)
2981                                 return;
2982
2983                         hci_conn_enter_active_mode(chan->conn,
2984                                                    bt_cb(skb)->force_active);
2985
2986                         hci_send_frame(skb);
2987                         hdev->acl_last_tx = jiffies;
2988
2989                         hdev->block_cnt -= blocks;
2990                         quote -= blocks;
2991
2992                         chan->sent += blocks;
2993                         chan->conn->sent += blocks;
2994                 }
2995         }
2996
2997         if (cnt != hdev->block_cnt)
2998                 hci_prio_recalculate(hdev, type);
2999 }
3000
3001 static void hci_sched_acl(struct hci_dev *hdev)
3002 {
3003         BT_DBG("%s", hdev->name);
3004
3005         /* No ACL link over BR/EDR controller */
3006         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3007                 return;
3008
3009         /* No AMP link over AMP controller */
3010         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3011                 return;
3012
3013         switch (hdev->flow_ctl_mode) {
3014         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3015                 hci_sched_acl_pkt(hdev);
3016                 break;
3017
3018         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3019                 hci_sched_acl_blk(hdev);
3020                 break;
3021         }
3022 }
3023
3024 /* Schedule SCO */
3025 static void hci_sched_sco(struct hci_dev *hdev)
3026 {
3027         struct hci_conn *conn;
3028         struct sk_buff *skb;
3029         int quote;
3030
3031         BT_DBG("%s", hdev->name);
3032
3033         if (!hci_conn_num(hdev, SCO_LINK))
3034                 return;
3035
3036         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3037                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3038                         BT_DBG("skb %p len %d", skb, skb->len);
3039                         hci_send_frame(skb);
3040
3041                         conn->sent++;
3042                         if (conn->sent == ~0)
3043                                 conn->sent = 0;
3044                 }
3045         }
3046 }
3047
3048 static void hci_sched_esco(struct hci_dev *hdev)
3049 {
3050         struct hci_conn *conn;
3051         struct sk_buff *skb;
3052         int quote;
3053
3054         BT_DBG("%s", hdev->name);
3055
3056         if (!hci_conn_num(hdev, ESCO_LINK))
3057                 return;
3058
3059         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3060                                                      &quote))) {
3061                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3062                         BT_DBG("skb %p len %d", skb, skb->len);
3063                         hci_send_frame(skb);
3064
3065                         conn->sent++;
3066                         if (conn->sent == ~0)
3067                                 conn->sent = 0;
3068                 }
3069         }
3070 }
3071
3072 static void hci_sched_le(struct hci_dev *hdev)
3073 {
3074         struct hci_chan *chan;
3075         struct sk_buff *skb;
3076         int quote, cnt, tmp;
3077
3078         BT_DBG("%s", hdev->name);
3079
3080         if (!hci_conn_num(hdev, LE_LINK))
3081                 return;
3082
3083         if (!test_bit(HCI_RAW, &hdev->flags)) {
3084                 /* LE tx timeout must be longer than maximum
3085                  * link supervision timeout (40.9 seconds) */
3086                 if (!hdev->le_cnt && hdev->le_pkts &&
3087                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3088                         hci_link_tx_to(hdev, LE_LINK);
3089         }
3090
3091         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3092         tmp = cnt;
3093         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3094                 u32 priority = (skb_peek(&chan->data_q))->priority;
3095                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3096                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3097                                skb->len, skb->priority);
3098
3099                         /* Stop if priority has changed */
3100                         if (skb->priority < priority)
3101                                 break;
3102
3103                         skb = skb_dequeue(&chan->data_q);
3104
3105                         hci_send_frame(skb);
3106                         hdev->le_last_tx = jiffies;
3107
3108                         cnt--;
3109                         chan->sent++;
3110                         chan->conn->sent++;
3111                 }
3112         }
3113
3114         if (hdev->le_pkts)
3115                 hdev->le_cnt = cnt;
3116         else
3117                 hdev->acl_cnt = cnt;
3118
3119         if (cnt != tmp)
3120                 hci_prio_recalculate(hdev, LE_LINK);
3121 }
3122
3123 static void hci_tx_work(struct work_struct *work)
3124 {
3125         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3126         struct sk_buff *skb;
3127
3128         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3129                hdev->sco_cnt, hdev->le_cnt);
3130
3131         /* Schedule queues and send stuff to HCI driver */
3132
3133         hci_sched_acl(hdev);
3134
3135         hci_sched_sco(hdev);
3136
3137         hci_sched_esco(hdev);
3138
3139         hci_sched_le(hdev);
3140
3141         /* Send next queued raw (unknown type) packet */
3142         while ((skb = skb_dequeue(&hdev->raw_q)))
3143                 hci_send_frame(skb);
3144 }
3145
3146 /* ----- HCI RX task (incoming data processing) ----- */
3147
3148 /* ACL data packet */
3149 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3150 {
3151         struct hci_acl_hdr *hdr = (void *) skb->data;
3152         struct hci_conn *conn;
3153         __u16 handle, flags;
3154
3155         skb_pull(skb, HCI_ACL_HDR_SIZE);
3156
3157         handle = __le16_to_cpu(hdr->handle);
3158         flags  = hci_flags(handle);
3159         handle = hci_handle(handle);
3160
3161         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3162                handle, flags);
3163
3164         hdev->stat.acl_rx++;
3165
3166         hci_dev_lock(hdev);
3167         conn = hci_conn_hash_lookup_handle(hdev, handle);
3168         hci_dev_unlock(hdev);
3169
3170         if (conn) {
3171                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3172
3173                 /* Send to upper protocol */
3174                 l2cap_recv_acldata(conn, skb, flags);
3175                 return;
3176         } else {
3177                 BT_ERR("%s ACL packet for unknown connection handle %d",
3178                        hdev->name, handle);
3179         }
3180
3181         kfree_skb(skb);
3182 }
3183
3184 /* SCO data packet */
3185 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3186 {
3187         struct hci_sco_hdr *hdr = (void *) skb->data;
3188         struct hci_conn *conn;
3189         __u16 handle;
3190
3191         skb_pull(skb, HCI_SCO_HDR_SIZE);
3192
3193         handle = __le16_to_cpu(hdr->handle);
3194
3195         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3196
3197         hdev->stat.sco_rx++;
3198
3199         hci_dev_lock(hdev);
3200         conn = hci_conn_hash_lookup_handle(hdev, handle);
3201         hci_dev_unlock(hdev);
3202
3203         if (conn) {
3204                 /* Send to upper protocol */
3205                 sco_recv_scodata(conn, skb);
3206                 return;
3207         } else {
3208                 BT_ERR("%s SCO packet for unknown connection handle %d",
3209                        hdev->name, handle);
3210         }
3211
3212         kfree_skb(skb);
3213 }
3214
3215 static bool hci_req_is_complete(struct hci_dev *hdev)
3216 {
3217         struct sk_buff *skb;
3218
3219         skb = skb_peek(&hdev->cmd_q);
3220         if (!skb)
3221                 return true;
3222
3223         return bt_cb(skb)->req.start;
3224 }
3225
3226 static void hci_resend_last(struct hci_dev *hdev)
3227 {
3228         struct hci_command_hdr *sent;
3229         struct sk_buff *skb;
3230         u16 opcode;
3231
3232         if (!hdev->sent_cmd)
3233                 return;
3234
3235         sent = (void *) hdev->sent_cmd->data;
3236         opcode = __le16_to_cpu(sent->opcode);
3237         if (opcode == HCI_OP_RESET)
3238                 return;
3239
3240         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3241         if (!skb)
3242                 return;
3243
3244         skb_queue_head(&hdev->cmd_q, skb);
3245         queue_work(hdev->workqueue, &hdev->cmd_work);
3246 }
3247
3248 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3249 {
3250         hci_req_complete_t req_complete = NULL;
3251         struct sk_buff *skb;
3252         unsigned long flags;
3253
3254         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3255
3256         /* If the completed command doesn't match the last one that was
3257          * sent we need to do special handling of it.
3258          */
3259         if (!hci_sent_cmd_data(hdev, opcode)) {
3260                 /* Some CSR based controllers generate a spontaneous
3261                  * reset complete event during init and any pending
3262                  * command will never be completed. In such a case we
3263                  * need to resend whatever was the last sent
3264                  * command.
3265                  */
3266                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3267                         hci_resend_last(hdev);
3268
3269                 return;
3270         }
3271
3272         /* If the command succeeded and there's still more commands in
3273          * this request the request is not yet complete.
3274          */
3275         if (!status && !hci_req_is_complete(hdev))
3276                 return;
3277
3278         /* If this was the last command in a request the complete
3279          * callback would be found in hdev->sent_cmd instead of the
3280          * command queue (hdev->cmd_q).
3281          */
3282         if (hdev->sent_cmd) {
3283                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3284                 if (req_complete)
3285                         goto call_complete;
3286         }
3287
3288         /* Remove all pending commands belonging to this request */
3289         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3290         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3291                 if (bt_cb(skb)->req.start) {
3292                         __skb_queue_head(&hdev->cmd_q, skb);
3293                         break;
3294                 }
3295
3296                 req_complete = bt_cb(skb)->req.complete;
3297                 kfree_skb(skb);
3298         }
3299         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3300
3301 call_complete:
3302         if (req_complete)
3303                 req_complete(hdev, status);
3304 }
3305
3306 void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3307 {
3308         hci_req_complete_t req_complete = NULL;
3309
3310         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3311
3312         if (status) {
3313                 hci_req_cmd_complete(hdev, opcode, status);
3314                 return;
3315         }
3316
3317         /* No need to handle success status if there are more commands */
3318         if (!hci_req_is_complete(hdev))
3319                 return;
3320
3321         if (hdev->sent_cmd)
3322                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3323
3324         /* If the request doesn't have a complete callback or there
3325          * are other commands/requests in the hdev queue we consider
3326          * this request as completed.
3327          */
3328         if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3329                 hci_req_cmd_complete(hdev, opcode, status);
3330 }
3331
3332 static void hci_rx_work(struct work_struct *work)
3333 {
3334         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3335         struct sk_buff *skb;
3336
3337         BT_DBG("%s", hdev->name);
3338
3339         while ((skb = skb_dequeue(&hdev->rx_q))) {
3340                 /* Send copy to monitor */
3341                 hci_send_to_monitor(hdev, skb);
3342
3343                 if (atomic_read(&hdev->promisc)) {
3344                         /* Send copy to the sockets */
3345                         hci_send_to_sock(hdev, skb);
3346                 }
3347
3348                 if (test_bit(HCI_RAW, &hdev->flags)) {
3349                         kfree_skb(skb);
3350                         continue;
3351                 }
3352
3353                 if (test_bit(HCI_INIT, &hdev->flags)) {
3354                         /* Don't process data packets in this states. */
3355                         switch (bt_cb(skb)->pkt_type) {
3356                         case HCI_ACLDATA_PKT:
3357                         case HCI_SCODATA_PKT:
3358                                 kfree_skb(skb);
3359                                 continue;
3360                         }
3361                 }
3362
3363                 /* Process frame */
3364                 switch (bt_cb(skb)->pkt_type) {
3365                 case HCI_EVENT_PKT:
3366                         BT_DBG("%s Event packet", hdev->name);
3367                         hci_event_packet(hdev, skb);
3368                         break;
3369
3370                 case HCI_ACLDATA_PKT:
3371                         BT_DBG("%s ACL data packet", hdev->name);
3372                         hci_acldata_packet(hdev, skb);
3373                         break;
3374
3375                 case HCI_SCODATA_PKT:
3376                         BT_DBG("%s SCO data packet", hdev->name);
3377                         hci_scodata_packet(hdev, skb);
3378                         break;
3379
3380                 default:
3381                         kfree_skb(skb);
3382                         break;
3383                 }
3384         }
3385 }
3386
3387 static void hci_cmd_work(struct work_struct *work)
3388 {
3389         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3390         struct sk_buff *skb;
3391
3392         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3393                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3394
3395         /* Send queued commands */
3396         if (atomic_read(&hdev->cmd_cnt)) {
3397                 skb = skb_dequeue(&hdev->cmd_q);
3398                 if (!skb)
3399                         return;
3400
3401                 kfree_skb(hdev->sent_cmd);
3402
3403                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3404                 if (hdev->sent_cmd) {
3405                         atomic_dec(&hdev->cmd_cnt);
3406                         hci_send_frame(skb);
3407                         if (test_bit(HCI_RESET, &hdev->flags))
3408                                 del_timer(&hdev->cmd_timer);
3409                         else
3410                                 mod_timer(&hdev->cmd_timer,
3411                                           jiffies + HCI_CMD_TIMEOUT);
3412                 } else {
3413                         skb_queue_head(&hdev->cmd_q, skb);
3414                         queue_work(hdev->workqueue, &hdev->cmd_work);
3415                 }
3416         }
3417 }
3418
3419 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3420 {
3421         /* General inquiry access code (GIAC) */
3422         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3423         struct hci_cp_inquiry cp;
3424
3425         BT_DBG("%s", hdev->name);
3426
3427         if (test_bit(HCI_INQUIRY, &hdev->flags))
3428                 return -EINPROGRESS;
3429
3430         inquiry_cache_flush(hdev);
3431
3432         memset(&cp, 0, sizeof(cp));
3433         memcpy(&cp.lap, lap, sizeof(cp.lap));
3434         cp.length  = length;
3435
3436         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3437 }
3438
3439 int hci_cancel_inquiry(struct hci_dev *hdev)
3440 {
3441         BT_DBG("%s", hdev->name);
3442
3443         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3444                 return -EALREADY;
3445
3446         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3447 }
3448
3449 u8 bdaddr_to_le(u8 bdaddr_type)
3450 {
3451         switch (bdaddr_type) {
3452         case BDADDR_LE_PUBLIC:
3453                 return ADDR_LE_DEV_PUBLIC;
3454
3455         default:
3456                 /* Fallback to LE Random address type */
3457                 return ADDR_LE_DEV_RANDOM;
3458         }
3459 }