Bluetooth: AMP: Handle AMP_LINK connection
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61 {
62         BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63
64         /* If this is the init phase check if the completed command matches
65          * the last init command, and if not just return.
66          */
67         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69                 u16 opcode = __le16_to_cpu(sent->opcode);
70                 struct sk_buff *skb;
71
72                 /* Some CSR based controllers generate a spontaneous
73                  * reset complete event during init and any pending
74                  * command will never be completed. In such a case we
75                  * need to resend whatever was the last sent
76                  * command.
77                  */
78
79                 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80                         return;
81
82                 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83                 if (skb) {
84                         skb_queue_head(&hdev->cmd_q, skb);
85                         queue_work(hdev->workqueue, &hdev->cmd_work);
86                 }
87
88                 return;
89         }
90
91         if (hdev->req_status == HCI_REQ_PEND) {
92                 hdev->req_result = result;
93                 hdev->req_status = HCI_REQ_DONE;
94                 wake_up_interruptible(&hdev->req_wait_q);
95         }
96 }
97
98 static void hci_req_cancel(struct hci_dev *hdev, int err)
99 {
100         BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102         if (hdev->req_status == HCI_REQ_PEND) {
103                 hdev->req_result = err;
104                 hdev->req_status = HCI_REQ_CANCELED;
105                 wake_up_interruptible(&hdev->req_wait_q);
106         }
107 }
108
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev *hdev,
111                          void (*req)(struct hci_dev *hdev, unsigned long opt),
112                          unsigned long opt, __u32 timeout)
113 {
114         DECLARE_WAITQUEUE(wait, current);
115         int err = 0;
116
117         BT_DBG("%s start", hdev->name);
118
119         hdev->req_status = HCI_REQ_PEND;
120
121         add_wait_queue(&hdev->req_wait_q, &wait);
122         set_current_state(TASK_INTERRUPTIBLE);
123
124         req(hdev, opt);
125         schedule_timeout(timeout);
126
127         remove_wait_queue(&hdev->req_wait_q, &wait);
128
129         if (signal_pending(current))
130                 return -EINTR;
131
132         switch (hdev->req_status) {
133         case HCI_REQ_DONE:
134                 err = -bt_to_errno(hdev->req_result);
135                 break;
136
137         case HCI_REQ_CANCELED:
138                 err = -hdev->req_result;
139                 break;
140
141         default:
142                 err = -ETIMEDOUT;
143                 break;
144         }
145
146         hdev->req_status = hdev->req_result = 0;
147
148         BT_DBG("%s end: err %d", hdev->name, err);
149
150         return err;
151 }
152
153 static int hci_request(struct hci_dev *hdev,
154                        void (*req)(struct hci_dev *hdev, unsigned long opt),
155                        unsigned long opt, __u32 timeout)
156 {
157         int ret;
158
159         if (!test_bit(HCI_UP, &hdev->flags))
160                 return -ENETDOWN;
161
162         /* Serialize all requests */
163         hci_req_lock(hdev);
164         ret = __hci_request(hdev, req, opt, timeout);
165         hci_req_unlock(hdev);
166
167         return ret;
168 }
169
170 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171 {
172         BT_DBG("%s %ld", hdev->name, opt);
173
174         /* Reset device */
175         set_bit(HCI_RESET, &hdev->flags);
176         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177 }
178
179 static void bredr_init(struct hci_dev *hdev)
180 {
181         struct hci_cp_delete_stored_link_key cp;
182         __le16 param;
183         __u8 flt_type;
184
185         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
186
187         /* Mandatory initialization */
188
189         /* Read Local Supported Features */
190         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
191
192         /* Read Local Version */
193         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
194
195         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
196         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
197
198         /* Read BD Address */
199         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
200
201         /* Read Class of Device */
202         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
203
204         /* Read Local Name */
205         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
206
207         /* Read Voice Setting */
208         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
209
210         /* Optional initialization */
211
212         /* Clear Event Filters */
213         flt_type = HCI_FLT_CLEAR_ALL;
214         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
215
216         /* Connection accept timeout ~20 secs */
217         param = __constant_cpu_to_le16(0x7d00);
218         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
219
220         bacpy(&cp.bdaddr, BDADDR_ANY);
221         cp.delete_all = 1;
222         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
223 }
224
225 static void amp_init(struct hci_dev *hdev)
226 {
227         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
228
229         /* Read Local Version */
230         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
231
232         /* Read Local AMP Info */
233         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
234
235         /* Read Data Blk size */
236         hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
237 }
238
239 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
240 {
241         struct sk_buff *skb;
242
243         BT_DBG("%s %ld", hdev->name, opt);
244
245         /* Driver initialization */
246
247         /* Special commands */
248         while ((skb = skb_dequeue(&hdev->driver_init))) {
249                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
250                 skb->dev = (void *) hdev;
251
252                 skb_queue_tail(&hdev->cmd_q, skb);
253                 queue_work(hdev->workqueue, &hdev->cmd_work);
254         }
255         skb_queue_purge(&hdev->driver_init);
256
257         /* Reset */
258         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
259                 hci_reset_req(hdev, 0);
260
261         switch (hdev->dev_type) {
262         case HCI_BREDR:
263                 bredr_init(hdev);
264                 break;
265
266         case HCI_AMP:
267                 amp_init(hdev);
268                 break;
269
270         default:
271                 BT_ERR("Unknown device type %d", hdev->dev_type);
272                 break;
273         }
274 }
275
276 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
277 {
278         BT_DBG("%s", hdev->name);
279
280         /* Read LE buffer size */
281         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
282 }
283
284 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
285 {
286         __u8 scan = opt;
287
288         BT_DBG("%s %x", hdev->name, scan);
289
290         /* Inquiry and Page scans */
291         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
292 }
293
294 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
295 {
296         __u8 auth = opt;
297
298         BT_DBG("%s %x", hdev->name, auth);
299
300         /* Authentication */
301         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
302 }
303
304 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
305 {
306         __u8 encrypt = opt;
307
308         BT_DBG("%s %x", hdev->name, encrypt);
309
310         /* Encryption */
311         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
312 }
313
314 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
315 {
316         __le16 policy = cpu_to_le16(opt);
317
318         BT_DBG("%s %x", hdev->name, policy);
319
320         /* Default link policy */
321         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
322 }
323
324 /* Get HCI device by index.
325  * Device is held on return. */
326 struct hci_dev *hci_dev_get(int index)
327 {
328         struct hci_dev *hdev = NULL, *d;
329
330         BT_DBG("%d", index);
331
332         if (index < 0)
333                 return NULL;
334
335         read_lock(&hci_dev_list_lock);
336         list_for_each_entry(d, &hci_dev_list, list) {
337                 if (d->id == index) {
338                         hdev = hci_dev_hold(d);
339                         break;
340                 }
341         }
342         read_unlock(&hci_dev_list_lock);
343         return hdev;
344 }
345
346 /* ---- Inquiry support ---- */
347
348 bool hci_discovery_active(struct hci_dev *hdev)
349 {
350         struct discovery_state *discov = &hdev->discovery;
351
352         switch (discov->state) {
353         case DISCOVERY_FINDING:
354         case DISCOVERY_RESOLVING:
355                 return true;
356
357         default:
358                 return false;
359         }
360 }
361
362 void hci_discovery_set_state(struct hci_dev *hdev, int state)
363 {
364         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
365
366         if (hdev->discovery.state == state)
367                 return;
368
369         switch (state) {
370         case DISCOVERY_STOPPED:
371                 if (hdev->discovery.state != DISCOVERY_STARTING)
372                         mgmt_discovering(hdev, 0);
373                 break;
374         case DISCOVERY_STARTING:
375                 break;
376         case DISCOVERY_FINDING:
377                 mgmt_discovering(hdev, 1);
378                 break;
379         case DISCOVERY_RESOLVING:
380                 break;
381         case DISCOVERY_STOPPING:
382                 break;
383         }
384
385         hdev->discovery.state = state;
386 }
387
388 static void inquiry_cache_flush(struct hci_dev *hdev)
389 {
390         struct discovery_state *cache = &hdev->discovery;
391         struct inquiry_entry *p, *n;
392
393         list_for_each_entry_safe(p, n, &cache->all, all) {
394                 list_del(&p->all);
395                 kfree(p);
396         }
397
398         INIT_LIST_HEAD(&cache->unknown);
399         INIT_LIST_HEAD(&cache->resolve);
400 }
401
402 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
403                                                bdaddr_t *bdaddr)
404 {
405         struct discovery_state *cache = &hdev->discovery;
406         struct inquiry_entry *e;
407
408         BT_DBG("cache %p, %pMR", cache, bdaddr);
409
410         list_for_each_entry(e, &cache->all, all) {
411                 if (!bacmp(&e->data.bdaddr, bdaddr))
412                         return e;
413         }
414
415         return NULL;
416 }
417
418 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
419                                                        bdaddr_t *bdaddr)
420 {
421         struct discovery_state *cache = &hdev->discovery;
422         struct inquiry_entry *e;
423
424         BT_DBG("cache %p, %pMR", cache, bdaddr);
425
426         list_for_each_entry(e, &cache->unknown, list) {
427                 if (!bacmp(&e->data.bdaddr, bdaddr))
428                         return e;
429         }
430
431         return NULL;
432 }
433
434 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
435                                                        bdaddr_t *bdaddr,
436                                                        int state)
437 {
438         struct discovery_state *cache = &hdev->discovery;
439         struct inquiry_entry *e;
440
441         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
442
443         list_for_each_entry(e, &cache->resolve, list) {
444                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
445                         return e;
446                 if (!bacmp(&e->data.bdaddr, bdaddr))
447                         return e;
448         }
449
450         return NULL;
451 }
452
453 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
454                                       struct inquiry_entry *ie)
455 {
456         struct discovery_state *cache = &hdev->discovery;
457         struct list_head *pos = &cache->resolve;
458         struct inquiry_entry *p;
459
460         list_del(&ie->list);
461
462         list_for_each_entry(p, &cache->resolve, list) {
463                 if (p->name_state != NAME_PENDING &&
464                     abs(p->data.rssi) >= abs(ie->data.rssi))
465                         break;
466                 pos = &p->list;
467         }
468
469         list_add(&ie->list, pos);
470 }
471
472 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
473                               bool name_known, bool *ssp)
474 {
475         struct discovery_state *cache = &hdev->discovery;
476         struct inquiry_entry *ie;
477
478         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
479
480         if (ssp)
481                 *ssp = data->ssp_mode;
482
483         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
484         if (ie) {
485                 if (ie->data.ssp_mode && ssp)
486                         *ssp = true;
487
488                 if (ie->name_state == NAME_NEEDED &&
489                     data->rssi != ie->data.rssi) {
490                         ie->data.rssi = data->rssi;
491                         hci_inquiry_cache_update_resolve(hdev, ie);
492                 }
493
494                 goto update;
495         }
496
497         /* Entry not in the cache. Add new one. */
498         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
499         if (!ie)
500                 return false;
501
502         list_add(&ie->all, &cache->all);
503
504         if (name_known) {
505                 ie->name_state = NAME_KNOWN;
506         } else {
507                 ie->name_state = NAME_NOT_KNOWN;
508                 list_add(&ie->list, &cache->unknown);
509         }
510
511 update:
512         if (name_known && ie->name_state != NAME_KNOWN &&
513             ie->name_state != NAME_PENDING) {
514                 ie->name_state = NAME_KNOWN;
515                 list_del(&ie->list);
516         }
517
518         memcpy(&ie->data, data, sizeof(*data));
519         ie->timestamp = jiffies;
520         cache->timestamp = jiffies;
521
522         if (ie->name_state == NAME_NOT_KNOWN)
523                 return false;
524
525         return true;
526 }
527
528 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
529 {
530         struct discovery_state *cache = &hdev->discovery;
531         struct inquiry_info *info = (struct inquiry_info *) buf;
532         struct inquiry_entry *e;
533         int copied = 0;
534
535         list_for_each_entry(e, &cache->all, all) {
536                 struct inquiry_data *data = &e->data;
537
538                 if (copied >= num)
539                         break;
540
541                 bacpy(&info->bdaddr, &data->bdaddr);
542                 info->pscan_rep_mode    = data->pscan_rep_mode;
543                 info->pscan_period_mode = data->pscan_period_mode;
544                 info->pscan_mode        = data->pscan_mode;
545                 memcpy(info->dev_class, data->dev_class, 3);
546                 info->clock_offset      = data->clock_offset;
547
548                 info++;
549                 copied++;
550         }
551
552         BT_DBG("cache %p, copied %d", cache, copied);
553         return copied;
554 }
555
556 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
557 {
558         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
559         struct hci_cp_inquiry cp;
560
561         BT_DBG("%s", hdev->name);
562
563         if (test_bit(HCI_INQUIRY, &hdev->flags))
564                 return;
565
566         /* Start Inquiry */
567         memcpy(&cp.lap, &ir->lap, 3);
568         cp.length  = ir->length;
569         cp.num_rsp = ir->num_rsp;
570         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
571 }
572
573 int hci_inquiry(void __user *arg)
574 {
575         __u8 __user *ptr = arg;
576         struct hci_inquiry_req ir;
577         struct hci_dev *hdev;
578         int err = 0, do_inquiry = 0, max_rsp;
579         long timeo;
580         __u8 *buf;
581
582         if (copy_from_user(&ir, ptr, sizeof(ir)))
583                 return -EFAULT;
584
585         hdev = hci_dev_get(ir.dev_id);
586         if (!hdev)
587                 return -ENODEV;
588
589         hci_dev_lock(hdev);
590         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
591             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
592                 inquiry_cache_flush(hdev);
593                 do_inquiry = 1;
594         }
595         hci_dev_unlock(hdev);
596
597         timeo = ir.length * msecs_to_jiffies(2000);
598
599         if (do_inquiry) {
600                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
601                 if (err < 0)
602                         goto done;
603         }
604
605         /* for unlimited number of responses we will use buffer with
606          * 255 entries
607          */
608         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
609
610         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611          * copy it to the user space.
612          */
613         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
614         if (!buf) {
615                 err = -ENOMEM;
616                 goto done;
617         }
618
619         hci_dev_lock(hdev);
620         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
621         hci_dev_unlock(hdev);
622
623         BT_DBG("num_rsp %d", ir.num_rsp);
624
625         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
626                 ptr += sizeof(ir);
627                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
628                                  ir.num_rsp))
629                         err = -EFAULT;
630         } else
631                 err = -EFAULT;
632
633         kfree(buf);
634
635 done:
636         hci_dev_put(hdev);
637         return err;
638 }
639
640 /* ---- HCI ioctl helpers ---- */
641
642 int hci_dev_open(__u16 dev)
643 {
644         struct hci_dev *hdev;
645         int ret = 0;
646
647         hdev = hci_dev_get(dev);
648         if (!hdev)
649                 return -ENODEV;
650
651         BT_DBG("%s %p", hdev->name, hdev);
652
653         hci_req_lock(hdev);
654
655         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
656                 ret = -ENODEV;
657                 goto done;
658         }
659
660         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
661                 ret = -ERFKILL;
662                 goto done;
663         }
664
665         if (test_bit(HCI_UP, &hdev->flags)) {
666                 ret = -EALREADY;
667                 goto done;
668         }
669
670         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
671                 set_bit(HCI_RAW, &hdev->flags);
672
673         /* Treat all non BR/EDR controllers as raw devices if
674            enable_hs is not set */
675         if (hdev->dev_type != HCI_BREDR && !enable_hs)
676                 set_bit(HCI_RAW, &hdev->flags);
677
678         if (hdev->open(hdev)) {
679                 ret = -EIO;
680                 goto done;
681         }
682
683         if (!test_bit(HCI_RAW, &hdev->flags)) {
684                 atomic_set(&hdev->cmd_cnt, 1);
685                 set_bit(HCI_INIT, &hdev->flags);
686                 hdev->init_last_cmd = 0;
687
688                 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
689
690                 if (lmp_host_le_capable(hdev))
691                         ret = __hci_request(hdev, hci_le_init_req, 0,
692                                             HCI_INIT_TIMEOUT);
693
694                 clear_bit(HCI_INIT, &hdev->flags);
695         }
696
697         if (!ret) {
698                 hci_dev_hold(hdev);
699                 set_bit(HCI_UP, &hdev->flags);
700                 hci_notify(hdev, HCI_DEV_UP);
701                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
702                     mgmt_valid_hdev(hdev)) {
703                         hci_dev_lock(hdev);
704                         mgmt_powered(hdev, 1);
705                         hci_dev_unlock(hdev);
706                 }
707         } else {
708                 /* Init failed, cleanup */
709                 flush_work(&hdev->tx_work);
710                 flush_work(&hdev->cmd_work);
711                 flush_work(&hdev->rx_work);
712
713                 skb_queue_purge(&hdev->cmd_q);
714                 skb_queue_purge(&hdev->rx_q);
715
716                 if (hdev->flush)
717                         hdev->flush(hdev);
718
719                 if (hdev->sent_cmd) {
720                         kfree_skb(hdev->sent_cmd);
721                         hdev->sent_cmd = NULL;
722                 }
723
724                 hdev->close(hdev);
725                 hdev->flags = 0;
726         }
727
728 done:
729         hci_req_unlock(hdev);
730         hci_dev_put(hdev);
731         return ret;
732 }
733
734 static int hci_dev_do_close(struct hci_dev *hdev)
735 {
736         BT_DBG("%s %p", hdev->name, hdev);
737
738         cancel_work_sync(&hdev->le_scan);
739
740         hci_req_cancel(hdev, ENODEV);
741         hci_req_lock(hdev);
742
743         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
744                 del_timer_sync(&hdev->cmd_timer);
745                 hci_req_unlock(hdev);
746                 return 0;
747         }
748
749         /* Flush RX and TX works */
750         flush_work(&hdev->tx_work);
751         flush_work(&hdev->rx_work);
752
753         if (hdev->discov_timeout > 0) {
754                 cancel_delayed_work(&hdev->discov_off);
755                 hdev->discov_timeout = 0;
756                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
757         }
758
759         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
760                 cancel_delayed_work(&hdev->service_cache);
761
762         cancel_delayed_work_sync(&hdev->le_scan_disable);
763
764         hci_dev_lock(hdev);
765         inquiry_cache_flush(hdev);
766         hci_conn_hash_flush(hdev);
767         hci_dev_unlock(hdev);
768
769         hci_notify(hdev, HCI_DEV_DOWN);
770
771         if (hdev->flush)
772                 hdev->flush(hdev);
773
774         /* Reset device */
775         skb_queue_purge(&hdev->cmd_q);
776         atomic_set(&hdev->cmd_cnt, 1);
777         if (!test_bit(HCI_RAW, &hdev->flags) &&
778             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
779                 set_bit(HCI_INIT, &hdev->flags);
780                 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
781                 clear_bit(HCI_INIT, &hdev->flags);
782         }
783
784         /* flush cmd  work */
785         flush_work(&hdev->cmd_work);
786
787         /* Drop queues */
788         skb_queue_purge(&hdev->rx_q);
789         skb_queue_purge(&hdev->cmd_q);
790         skb_queue_purge(&hdev->raw_q);
791
792         /* Drop last sent command */
793         if (hdev->sent_cmd) {
794                 del_timer_sync(&hdev->cmd_timer);
795                 kfree_skb(hdev->sent_cmd);
796                 hdev->sent_cmd = NULL;
797         }
798
799         /* After this point our queues are empty
800          * and no tasks are scheduled. */
801         hdev->close(hdev);
802
803         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
804             mgmt_valid_hdev(hdev)) {
805                 hci_dev_lock(hdev);
806                 mgmt_powered(hdev, 0);
807                 hci_dev_unlock(hdev);
808         }
809
810         /* Clear flags */
811         hdev->flags = 0;
812
813         memset(hdev->eir, 0, sizeof(hdev->eir));
814         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
815
816         hci_req_unlock(hdev);
817
818         hci_dev_put(hdev);
819         return 0;
820 }
821
822 int hci_dev_close(__u16 dev)
823 {
824         struct hci_dev *hdev;
825         int err;
826
827         hdev = hci_dev_get(dev);
828         if (!hdev)
829                 return -ENODEV;
830
831         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
832                 cancel_delayed_work(&hdev->power_off);
833
834         err = hci_dev_do_close(hdev);
835
836         hci_dev_put(hdev);
837         return err;
838 }
839
840 int hci_dev_reset(__u16 dev)
841 {
842         struct hci_dev *hdev;
843         int ret = 0;
844
845         hdev = hci_dev_get(dev);
846         if (!hdev)
847                 return -ENODEV;
848
849         hci_req_lock(hdev);
850
851         if (!test_bit(HCI_UP, &hdev->flags))
852                 goto done;
853
854         /* Drop queues */
855         skb_queue_purge(&hdev->rx_q);
856         skb_queue_purge(&hdev->cmd_q);
857
858         hci_dev_lock(hdev);
859         inquiry_cache_flush(hdev);
860         hci_conn_hash_flush(hdev);
861         hci_dev_unlock(hdev);
862
863         if (hdev->flush)
864                 hdev->flush(hdev);
865
866         atomic_set(&hdev->cmd_cnt, 1);
867         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
868
869         if (!test_bit(HCI_RAW, &hdev->flags))
870                 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
871
872 done:
873         hci_req_unlock(hdev);
874         hci_dev_put(hdev);
875         return ret;
876 }
877
878 int hci_dev_reset_stat(__u16 dev)
879 {
880         struct hci_dev *hdev;
881         int ret = 0;
882
883         hdev = hci_dev_get(dev);
884         if (!hdev)
885                 return -ENODEV;
886
887         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
888
889         hci_dev_put(hdev);
890
891         return ret;
892 }
893
894 int hci_dev_cmd(unsigned int cmd, void __user *arg)
895 {
896         struct hci_dev *hdev;
897         struct hci_dev_req dr;
898         int err = 0;
899
900         if (copy_from_user(&dr, arg, sizeof(dr)))
901                 return -EFAULT;
902
903         hdev = hci_dev_get(dr.dev_id);
904         if (!hdev)
905                 return -ENODEV;
906
907         switch (cmd) {
908         case HCISETAUTH:
909                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
910                                   HCI_INIT_TIMEOUT);
911                 break;
912
913         case HCISETENCRYPT:
914                 if (!lmp_encrypt_capable(hdev)) {
915                         err = -EOPNOTSUPP;
916                         break;
917                 }
918
919                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
920                         /* Auth must be enabled first */
921                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
922                                           HCI_INIT_TIMEOUT);
923                         if (err)
924                                 break;
925                 }
926
927                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
928                                   HCI_INIT_TIMEOUT);
929                 break;
930
931         case HCISETSCAN:
932                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
933                                   HCI_INIT_TIMEOUT);
934                 break;
935
936         case HCISETLINKPOL:
937                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
938                                   HCI_INIT_TIMEOUT);
939                 break;
940
941         case HCISETLINKMODE:
942                 hdev->link_mode = ((__u16) dr.dev_opt) &
943                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
944                 break;
945
946         case HCISETPTYPE:
947                 hdev->pkt_type = (__u16) dr.dev_opt;
948                 break;
949
950         case HCISETACLMTU:
951                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
952                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
953                 break;
954
955         case HCISETSCOMTU:
956                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
957                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
958                 break;
959
960         default:
961                 err = -EINVAL;
962                 break;
963         }
964
965         hci_dev_put(hdev);
966         return err;
967 }
968
969 int hci_get_dev_list(void __user *arg)
970 {
971         struct hci_dev *hdev;
972         struct hci_dev_list_req *dl;
973         struct hci_dev_req *dr;
974         int n = 0, size, err;
975         __u16 dev_num;
976
977         if (get_user(dev_num, (__u16 __user *) arg))
978                 return -EFAULT;
979
980         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
981                 return -EINVAL;
982
983         size = sizeof(*dl) + dev_num * sizeof(*dr);
984
985         dl = kzalloc(size, GFP_KERNEL);
986         if (!dl)
987                 return -ENOMEM;
988
989         dr = dl->dev_req;
990
991         read_lock(&hci_dev_list_lock);
992         list_for_each_entry(hdev, &hci_dev_list, list) {
993                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
994                         cancel_delayed_work(&hdev->power_off);
995
996                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
997                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
998
999                 (dr + n)->dev_id  = hdev->id;
1000                 (dr + n)->dev_opt = hdev->flags;
1001
1002                 if (++n >= dev_num)
1003                         break;
1004         }
1005         read_unlock(&hci_dev_list_lock);
1006
1007         dl->dev_num = n;
1008         size = sizeof(*dl) + n * sizeof(*dr);
1009
1010         err = copy_to_user(arg, dl, size);
1011         kfree(dl);
1012
1013         return err ? -EFAULT : 0;
1014 }
1015
1016 int hci_get_dev_info(void __user *arg)
1017 {
1018         struct hci_dev *hdev;
1019         struct hci_dev_info di;
1020         int err = 0;
1021
1022         if (copy_from_user(&di, arg, sizeof(di)))
1023                 return -EFAULT;
1024
1025         hdev = hci_dev_get(di.dev_id);
1026         if (!hdev)
1027                 return -ENODEV;
1028
1029         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1030                 cancel_delayed_work_sync(&hdev->power_off);
1031
1032         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1033                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1034
1035         strcpy(di.name, hdev->name);
1036         di.bdaddr   = hdev->bdaddr;
1037         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1038         di.flags    = hdev->flags;
1039         di.pkt_type = hdev->pkt_type;
1040         di.acl_mtu  = hdev->acl_mtu;
1041         di.acl_pkts = hdev->acl_pkts;
1042         di.sco_mtu  = hdev->sco_mtu;
1043         di.sco_pkts = hdev->sco_pkts;
1044         di.link_policy = hdev->link_policy;
1045         di.link_mode   = hdev->link_mode;
1046
1047         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1048         memcpy(&di.features, &hdev->features, sizeof(di.features));
1049
1050         if (copy_to_user(arg, &di, sizeof(di)))
1051                 err = -EFAULT;
1052
1053         hci_dev_put(hdev);
1054
1055         return err;
1056 }
1057
1058 /* ---- Interface to HCI drivers ---- */
1059
1060 static int hci_rfkill_set_block(void *data, bool blocked)
1061 {
1062         struct hci_dev *hdev = data;
1063
1064         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1065
1066         if (!blocked)
1067                 return 0;
1068
1069         hci_dev_do_close(hdev);
1070
1071         return 0;
1072 }
1073
1074 static const struct rfkill_ops hci_rfkill_ops = {
1075         .set_block = hci_rfkill_set_block,
1076 };
1077
1078 static void hci_power_on(struct work_struct *work)
1079 {
1080         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1081
1082         BT_DBG("%s", hdev->name);
1083
1084         if (hci_dev_open(hdev->id) < 0)
1085                 return;
1086
1087         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1088                 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1089
1090         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1091                 mgmt_index_added(hdev);
1092 }
1093
1094 static void hci_power_off(struct work_struct *work)
1095 {
1096         struct hci_dev *hdev = container_of(work, struct hci_dev,
1097                                             power_off.work);
1098
1099         BT_DBG("%s", hdev->name);
1100
1101         hci_dev_do_close(hdev);
1102 }
1103
1104 static void hci_discov_off(struct work_struct *work)
1105 {
1106         struct hci_dev *hdev;
1107         u8 scan = SCAN_PAGE;
1108
1109         hdev = container_of(work, struct hci_dev, discov_off.work);
1110
1111         BT_DBG("%s", hdev->name);
1112
1113         hci_dev_lock(hdev);
1114
1115         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1116
1117         hdev->discov_timeout = 0;
1118
1119         hci_dev_unlock(hdev);
1120 }
1121
1122 int hci_uuids_clear(struct hci_dev *hdev)
1123 {
1124         struct list_head *p, *n;
1125
1126         list_for_each_safe(p, n, &hdev->uuids) {
1127                 struct bt_uuid *uuid;
1128
1129                 uuid = list_entry(p, struct bt_uuid, list);
1130
1131                 list_del(p);
1132                 kfree(uuid);
1133         }
1134
1135         return 0;
1136 }
1137
1138 int hci_link_keys_clear(struct hci_dev *hdev)
1139 {
1140         struct list_head *p, *n;
1141
1142         list_for_each_safe(p, n, &hdev->link_keys) {
1143                 struct link_key *key;
1144
1145                 key = list_entry(p, struct link_key, list);
1146
1147                 list_del(p);
1148                 kfree(key);
1149         }
1150
1151         return 0;
1152 }
1153
1154 int hci_smp_ltks_clear(struct hci_dev *hdev)
1155 {
1156         struct smp_ltk *k, *tmp;
1157
1158         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1159                 list_del(&k->list);
1160                 kfree(k);
1161         }
1162
1163         return 0;
1164 }
1165
1166 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1167 {
1168         struct link_key *k;
1169
1170         list_for_each_entry(k, &hdev->link_keys, list)
1171                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1172                         return k;
1173
1174         return NULL;
1175 }
1176
1177 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1178                                u8 key_type, u8 old_key_type)
1179 {
1180         /* Legacy key */
1181         if (key_type < 0x03)
1182                 return true;
1183
1184         /* Debug keys are insecure so don't store them persistently */
1185         if (key_type == HCI_LK_DEBUG_COMBINATION)
1186                 return false;
1187
1188         /* Changed combination key and there's no previous one */
1189         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1190                 return false;
1191
1192         /* Security mode 3 case */
1193         if (!conn)
1194                 return true;
1195
1196         /* Neither local nor remote side had no-bonding as requirement */
1197         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1198                 return true;
1199
1200         /* Local side had dedicated bonding as requirement */
1201         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1202                 return true;
1203
1204         /* Remote side had dedicated bonding as requirement */
1205         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1206                 return true;
1207
1208         /* If none of the above criteria match, then don't store the key
1209          * persistently */
1210         return false;
1211 }
1212
1213 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1214 {
1215         struct smp_ltk *k;
1216
1217         list_for_each_entry(k, &hdev->long_term_keys, list) {
1218                 if (k->ediv != ediv ||
1219                     memcmp(rand, k->rand, sizeof(k->rand)))
1220                         continue;
1221
1222                 return k;
1223         }
1224
1225         return NULL;
1226 }
1227
1228 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1229                                      u8 addr_type)
1230 {
1231         struct smp_ltk *k;
1232
1233         list_for_each_entry(k, &hdev->long_term_keys, list)
1234                 if (addr_type == k->bdaddr_type &&
1235                     bacmp(bdaddr, &k->bdaddr) == 0)
1236                         return k;
1237
1238         return NULL;
1239 }
1240
1241 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1242                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1243 {
1244         struct link_key *key, *old_key;
1245         u8 old_key_type;
1246         bool persistent;
1247
1248         old_key = hci_find_link_key(hdev, bdaddr);
1249         if (old_key) {
1250                 old_key_type = old_key->type;
1251                 key = old_key;
1252         } else {
1253                 old_key_type = conn ? conn->key_type : 0xff;
1254                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1255                 if (!key)
1256                         return -ENOMEM;
1257                 list_add(&key->list, &hdev->link_keys);
1258         }
1259
1260         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1261
1262         /* Some buggy controller combinations generate a changed
1263          * combination key for legacy pairing even when there's no
1264          * previous key */
1265         if (type == HCI_LK_CHANGED_COMBINATION &&
1266             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1267                 type = HCI_LK_COMBINATION;
1268                 if (conn)
1269                         conn->key_type = type;
1270         }
1271
1272         bacpy(&key->bdaddr, bdaddr);
1273         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1274         key->pin_len = pin_len;
1275
1276         if (type == HCI_LK_CHANGED_COMBINATION)
1277                 key->type = old_key_type;
1278         else
1279                 key->type = type;
1280
1281         if (!new_key)
1282                 return 0;
1283
1284         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1285
1286         mgmt_new_link_key(hdev, key, persistent);
1287
1288         if (conn)
1289                 conn->flush_key = !persistent;
1290
1291         return 0;
1292 }
1293
1294 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1295                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1296                 ediv, u8 rand[8])
1297 {
1298         struct smp_ltk *key, *old_key;
1299
1300         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1301                 return 0;
1302
1303         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1304         if (old_key)
1305                 key = old_key;
1306         else {
1307                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1308                 if (!key)
1309                         return -ENOMEM;
1310                 list_add(&key->list, &hdev->long_term_keys);
1311         }
1312
1313         bacpy(&key->bdaddr, bdaddr);
1314         key->bdaddr_type = addr_type;
1315         memcpy(key->val, tk, sizeof(key->val));
1316         key->authenticated = authenticated;
1317         key->ediv = ediv;
1318         key->enc_size = enc_size;
1319         key->type = type;
1320         memcpy(key->rand, rand, sizeof(key->rand));
1321
1322         if (!new_key)
1323                 return 0;
1324
1325         if (type & HCI_SMP_LTK)
1326                 mgmt_new_ltk(hdev, key, 1);
1327
1328         return 0;
1329 }
1330
1331 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1332 {
1333         struct link_key *key;
1334
1335         key = hci_find_link_key(hdev, bdaddr);
1336         if (!key)
1337                 return -ENOENT;
1338
1339         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1340
1341         list_del(&key->list);
1342         kfree(key);
1343
1344         return 0;
1345 }
1346
1347 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1348 {
1349         struct smp_ltk *k, *tmp;
1350
1351         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1352                 if (bacmp(bdaddr, &k->bdaddr))
1353                         continue;
1354
1355                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1356
1357                 list_del(&k->list);
1358                 kfree(k);
1359         }
1360
1361         return 0;
1362 }
1363
1364 /* HCI command timer function */
1365 static void hci_cmd_timeout(unsigned long arg)
1366 {
1367         struct hci_dev *hdev = (void *) arg;
1368
1369         if (hdev->sent_cmd) {
1370                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1371                 u16 opcode = __le16_to_cpu(sent->opcode);
1372
1373                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1374         } else {
1375                 BT_ERR("%s command tx timeout", hdev->name);
1376         }
1377
1378         atomic_set(&hdev->cmd_cnt, 1);
1379         queue_work(hdev->workqueue, &hdev->cmd_work);
1380 }
1381
1382 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1383                                           bdaddr_t *bdaddr)
1384 {
1385         struct oob_data *data;
1386
1387         list_for_each_entry(data, &hdev->remote_oob_data, list)
1388                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1389                         return data;
1390
1391         return NULL;
1392 }
1393
1394 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1395 {
1396         struct oob_data *data;
1397
1398         data = hci_find_remote_oob_data(hdev, bdaddr);
1399         if (!data)
1400                 return -ENOENT;
1401
1402         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1403
1404         list_del(&data->list);
1405         kfree(data);
1406
1407         return 0;
1408 }
1409
1410 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1411 {
1412         struct oob_data *data, *n;
1413
1414         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1415                 list_del(&data->list);
1416                 kfree(data);
1417         }
1418
1419         return 0;
1420 }
1421
1422 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1423                             u8 *randomizer)
1424 {
1425         struct oob_data *data;
1426
1427         data = hci_find_remote_oob_data(hdev, bdaddr);
1428
1429         if (!data) {
1430                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1431                 if (!data)
1432                         return -ENOMEM;
1433
1434                 bacpy(&data->bdaddr, bdaddr);
1435                 list_add(&data->list, &hdev->remote_oob_data);
1436         }
1437
1438         memcpy(data->hash, hash, sizeof(data->hash));
1439         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1440
1441         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1442
1443         return 0;
1444 }
1445
1446 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1447 {
1448         struct bdaddr_list *b;
1449
1450         list_for_each_entry(b, &hdev->blacklist, list)
1451                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1452                         return b;
1453
1454         return NULL;
1455 }
1456
1457 int hci_blacklist_clear(struct hci_dev *hdev)
1458 {
1459         struct list_head *p, *n;
1460
1461         list_for_each_safe(p, n, &hdev->blacklist) {
1462                 struct bdaddr_list *b;
1463
1464                 b = list_entry(p, struct bdaddr_list, list);
1465
1466                 list_del(p);
1467                 kfree(b);
1468         }
1469
1470         return 0;
1471 }
1472
1473 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1474 {
1475         struct bdaddr_list *entry;
1476
1477         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1478                 return -EBADF;
1479
1480         if (hci_blacklist_lookup(hdev, bdaddr))
1481                 return -EEXIST;
1482
1483         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1484         if (!entry)
1485                 return -ENOMEM;
1486
1487         bacpy(&entry->bdaddr, bdaddr);
1488
1489         list_add(&entry->list, &hdev->blacklist);
1490
1491         return mgmt_device_blocked(hdev, bdaddr, type);
1492 }
1493
1494 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1495 {
1496         struct bdaddr_list *entry;
1497
1498         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1499                 return hci_blacklist_clear(hdev);
1500
1501         entry = hci_blacklist_lookup(hdev, bdaddr);
1502         if (!entry)
1503                 return -ENOENT;
1504
1505         list_del(&entry->list);
1506         kfree(entry);
1507
1508         return mgmt_device_unblocked(hdev, bdaddr, type);
1509 }
1510
1511 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1512 {
1513         struct le_scan_params *param =  (struct le_scan_params *) opt;
1514         struct hci_cp_le_set_scan_param cp;
1515
1516         memset(&cp, 0, sizeof(cp));
1517         cp.type = param->type;
1518         cp.interval = cpu_to_le16(param->interval);
1519         cp.window = cpu_to_le16(param->window);
1520
1521         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1522 }
1523
1524 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1525 {
1526         struct hci_cp_le_set_scan_enable cp;
1527
1528         memset(&cp, 0, sizeof(cp));
1529         cp.enable = 1;
1530         cp.filter_dup = 1;
1531
1532         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1533 }
1534
1535 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1536                           u16 window, int timeout)
1537 {
1538         long timeo = msecs_to_jiffies(3000);
1539         struct le_scan_params param;
1540         int err;
1541
1542         BT_DBG("%s", hdev->name);
1543
1544         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1545                 return -EINPROGRESS;
1546
1547         param.type = type;
1548         param.interval = interval;
1549         param.window = window;
1550
1551         hci_req_lock(hdev);
1552
1553         err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1554                             timeo);
1555         if (!err)
1556                 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1557
1558         hci_req_unlock(hdev);
1559
1560         if (err < 0)
1561                 return err;
1562
1563         schedule_delayed_work(&hdev->le_scan_disable,
1564                               msecs_to_jiffies(timeout));
1565
1566         return 0;
1567 }
1568
1569 int hci_cancel_le_scan(struct hci_dev *hdev)
1570 {
1571         BT_DBG("%s", hdev->name);
1572
1573         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1574                 return -EALREADY;
1575
1576         if (cancel_delayed_work(&hdev->le_scan_disable)) {
1577                 struct hci_cp_le_set_scan_enable cp;
1578
1579                 /* Send HCI command to disable LE Scan */
1580                 memset(&cp, 0, sizeof(cp));
1581                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1582         }
1583
1584         return 0;
1585 }
1586
1587 static void le_scan_disable_work(struct work_struct *work)
1588 {
1589         struct hci_dev *hdev = container_of(work, struct hci_dev,
1590                                             le_scan_disable.work);
1591         struct hci_cp_le_set_scan_enable cp;
1592
1593         BT_DBG("%s", hdev->name);
1594
1595         memset(&cp, 0, sizeof(cp));
1596
1597         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1598 }
1599
1600 static void le_scan_work(struct work_struct *work)
1601 {
1602         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1603         struct le_scan_params *param = &hdev->le_scan_params;
1604
1605         BT_DBG("%s", hdev->name);
1606
1607         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1608                        param->timeout);
1609 }
1610
1611 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1612                 int timeout)
1613 {
1614         struct le_scan_params *param = &hdev->le_scan_params;
1615
1616         BT_DBG("%s", hdev->name);
1617
1618         if (work_busy(&hdev->le_scan))
1619                 return -EINPROGRESS;
1620
1621         param->type = type;
1622         param->interval = interval;
1623         param->window = window;
1624         param->timeout = timeout;
1625
1626         queue_work(system_long_wq, &hdev->le_scan);
1627
1628         return 0;
1629 }
1630
1631 /* Alloc HCI device */
1632 struct hci_dev *hci_alloc_dev(void)
1633 {
1634         struct hci_dev *hdev;
1635
1636         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1637         if (!hdev)
1638                 return NULL;
1639
1640         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1641         hdev->esco_type = (ESCO_HV1);
1642         hdev->link_mode = (HCI_LM_ACCEPT);
1643         hdev->io_capability = 0x03; /* No Input No Output */
1644
1645         hdev->sniff_max_interval = 800;
1646         hdev->sniff_min_interval = 80;
1647
1648         mutex_init(&hdev->lock);
1649         mutex_init(&hdev->req_lock);
1650
1651         INIT_LIST_HEAD(&hdev->mgmt_pending);
1652         INIT_LIST_HEAD(&hdev->blacklist);
1653         INIT_LIST_HEAD(&hdev->uuids);
1654         INIT_LIST_HEAD(&hdev->link_keys);
1655         INIT_LIST_HEAD(&hdev->long_term_keys);
1656         INIT_LIST_HEAD(&hdev->remote_oob_data);
1657         INIT_LIST_HEAD(&hdev->conn_hash.list);
1658
1659         INIT_WORK(&hdev->rx_work, hci_rx_work);
1660         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1661         INIT_WORK(&hdev->tx_work, hci_tx_work);
1662         INIT_WORK(&hdev->power_on, hci_power_on);
1663         INIT_WORK(&hdev->le_scan, le_scan_work);
1664
1665         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1666         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1667         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1668
1669         skb_queue_head_init(&hdev->driver_init);
1670         skb_queue_head_init(&hdev->rx_q);
1671         skb_queue_head_init(&hdev->cmd_q);
1672         skb_queue_head_init(&hdev->raw_q);
1673
1674         init_waitqueue_head(&hdev->req_wait_q);
1675
1676         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1677
1678         hci_init_sysfs(hdev);
1679         discovery_init(hdev);
1680
1681         return hdev;
1682 }
1683 EXPORT_SYMBOL(hci_alloc_dev);
1684
1685 /* Free HCI device */
1686 void hci_free_dev(struct hci_dev *hdev)
1687 {
1688         skb_queue_purge(&hdev->driver_init);
1689
1690         /* will free via device release */
1691         put_device(&hdev->dev);
1692 }
1693 EXPORT_SYMBOL(hci_free_dev);
1694
1695 /* Register HCI device */
1696 int hci_register_dev(struct hci_dev *hdev)
1697 {
1698         int id, error;
1699
1700         if (!hdev->open || !hdev->close)
1701                 return -EINVAL;
1702
1703         /* Do not allow HCI_AMP devices to register at index 0,
1704          * so the index can be used as the AMP controller ID.
1705          */
1706         switch (hdev->dev_type) {
1707         case HCI_BREDR:
1708                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1709                 break;
1710         case HCI_AMP:
1711                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1712                 break;
1713         default:
1714                 return -EINVAL;
1715         }
1716
1717         if (id < 0)
1718                 return id;
1719
1720         sprintf(hdev->name, "hci%d", id);
1721         hdev->id = id;
1722
1723         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1724
1725         write_lock(&hci_dev_list_lock);
1726         list_add(&hdev->list, &hci_dev_list);
1727         write_unlock(&hci_dev_list_lock);
1728
1729         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1730                                           WQ_MEM_RECLAIM, 1);
1731         if (!hdev->workqueue) {
1732                 error = -ENOMEM;
1733                 goto err;
1734         }
1735
1736         error = hci_add_sysfs(hdev);
1737         if (error < 0)
1738                 goto err_wqueue;
1739
1740         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1741                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1742                                     hdev);
1743         if (hdev->rfkill) {
1744                 if (rfkill_register(hdev->rfkill) < 0) {
1745                         rfkill_destroy(hdev->rfkill);
1746                         hdev->rfkill = NULL;
1747                 }
1748         }
1749
1750         set_bit(HCI_SETUP, &hdev->dev_flags);
1751
1752         if (hdev->dev_type != HCI_AMP)
1753                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1754
1755         schedule_work(&hdev->power_on);
1756
1757         hci_notify(hdev, HCI_DEV_REG);
1758         hci_dev_hold(hdev);
1759
1760         return id;
1761
1762 err_wqueue:
1763         destroy_workqueue(hdev->workqueue);
1764 err:
1765         ida_simple_remove(&hci_index_ida, hdev->id);
1766         write_lock(&hci_dev_list_lock);
1767         list_del(&hdev->list);
1768         write_unlock(&hci_dev_list_lock);
1769
1770         return error;
1771 }
1772 EXPORT_SYMBOL(hci_register_dev);
1773
1774 /* Unregister HCI device */
1775 void hci_unregister_dev(struct hci_dev *hdev)
1776 {
1777         int i, id;
1778
1779         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1780
1781         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1782
1783         id = hdev->id;
1784
1785         write_lock(&hci_dev_list_lock);
1786         list_del(&hdev->list);
1787         write_unlock(&hci_dev_list_lock);
1788
1789         hci_dev_do_close(hdev);
1790
1791         for (i = 0; i < NUM_REASSEMBLY; i++)
1792                 kfree_skb(hdev->reassembly[i]);
1793
1794         if (!test_bit(HCI_INIT, &hdev->flags) &&
1795             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1796                 hci_dev_lock(hdev);
1797                 mgmt_index_removed(hdev);
1798                 hci_dev_unlock(hdev);
1799         }
1800
1801         /* mgmt_index_removed should take care of emptying the
1802          * pending list */
1803         BUG_ON(!list_empty(&hdev->mgmt_pending));
1804
1805         hci_notify(hdev, HCI_DEV_UNREG);
1806
1807         if (hdev->rfkill) {
1808                 rfkill_unregister(hdev->rfkill);
1809                 rfkill_destroy(hdev->rfkill);
1810         }
1811
1812         hci_del_sysfs(hdev);
1813
1814         destroy_workqueue(hdev->workqueue);
1815
1816         hci_dev_lock(hdev);
1817         hci_blacklist_clear(hdev);
1818         hci_uuids_clear(hdev);
1819         hci_link_keys_clear(hdev);
1820         hci_smp_ltks_clear(hdev);
1821         hci_remote_oob_data_clear(hdev);
1822         hci_dev_unlock(hdev);
1823
1824         hci_dev_put(hdev);
1825
1826         ida_simple_remove(&hci_index_ida, id);
1827 }
1828 EXPORT_SYMBOL(hci_unregister_dev);
1829
1830 /* Suspend HCI device */
1831 int hci_suspend_dev(struct hci_dev *hdev)
1832 {
1833         hci_notify(hdev, HCI_DEV_SUSPEND);
1834         return 0;
1835 }
1836 EXPORT_SYMBOL(hci_suspend_dev);
1837
1838 /* Resume HCI device */
1839 int hci_resume_dev(struct hci_dev *hdev)
1840 {
1841         hci_notify(hdev, HCI_DEV_RESUME);
1842         return 0;
1843 }
1844 EXPORT_SYMBOL(hci_resume_dev);
1845
1846 /* Receive frame from HCI drivers */
1847 int hci_recv_frame(struct sk_buff *skb)
1848 {
1849         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1850         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1851                       && !test_bit(HCI_INIT, &hdev->flags))) {
1852                 kfree_skb(skb);
1853                 return -ENXIO;
1854         }
1855
1856         /* Incomming skb */
1857         bt_cb(skb)->incoming = 1;
1858
1859         /* Time stamp */
1860         __net_timestamp(skb);
1861
1862         skb_queue_tail(&hdev->rx_q, skb);
1863         queue_work(hdev->workqueue, &hdev->rx_work);
1864
1865         return 0;
1866 }
1867 EXPORT_SYMBOL(hci_recv_frame);
1868
1869 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1870                           int count, __u8 index)
1871 {
1872         int len = 0;
1873         int hlen = 0;
1874         int remain = count;
1875         struct sk_buff *skb;
1876         struct bt_skb_cb *scb;
1877
1878         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1879             index >= NUM_REASSEMBLY)
1880                 return -EILSEQ;
1881
1882         skb = hdev->reassembly[index];
1883
1884         if (!skb) {
1885                 switch (type) {
1886                 case HCI_ACLDATA_PKT:
1887                         len = HCI_MAX_FRAME_SIZE;
1888                         hlen = HCI_ACL_HDR_SIZE;
1889                         break;
1890                 case HCI_EVENT_PKT:
1891                         len = HCI_MAX_EVENT_SIZE;
1892                         hlen = HCI_EVENT_HDR_SIZE;
1893                         break;
1894                 case HCI_SCODATA_PKT:
1895                         len = HCI_MAX_SCO_SIZE;
1896                         hlen = HCI_SCO_HDR_SIZE;
1897                         break;
1898                 }
1899
1900                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1901                 if (!skb)
1902                         return -ENOMEM;
1903
1904                 scb = (void *) skb->cb;
1905                 scb->expect = hlen;
1906                 scb->pkt_type = type;
1907
1908                 skb->dev = (void *) hdev;
1909                 hdev->reassembly[index] = skb;
1910         }
1911
1912         while (count) {
1913                 scb = (void *) skb->cb;
1914                 len = min_t(uint, scb->expect, count);
1915
1916                 memcpy(skb_put(skb, len), data, len);
1917
1918                 count -= len;
1919                 data += len;
1920                 scb->expect -= len;
1921                 remain = count;
1922
1923                 switch (type) {
1924                 case HCI_EVENT_PKT:
1925                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1926                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1927                                 scb->expect = h->plen;
1928
1929                                 if (skb_tailroom(skb) < scb->expect) {
1930                                         kfree_skb(skb);
1931                                         hdev->reassembly[index] = NULL;
1932                                         return -ENOMEM;
1933                                 }
1934                         }
1935                         break;
1936
1937                 case HCI_ACLDATA_PKT:
1938                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1939                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1940                                 scb->expect = __le16_to_cpu(h->dlen);
1941
1942                                 if (skb_tailroom(skb) < scb->expect) {
1943                                         kfree_skb(skb);
1944                                         hdev->reassembly[index] = NULL;
1945                                         return -ENOMEM;
1946                                 }
1947                         }
1948                         break;
1949
1950                 case HCI_SCODATA_PKT:
1951                         if (skb->len == HCI_SCO_HDR_SIZE) {
1952                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1953                                 scb->expect = h->dlen;
1954
1955                                 if (skb_tailroom(skb) < scb->expect) {
1956                                         kfree_skb(skb);
1957                                         hdev->reassembly[index] = NULL;
1958                                         return -ENOMEM;
1959                                 }
1960                         }
1961                         break;
1962                 }
1963
1964                 if (scb->expect == 0) {
1965                         /* Complete frame */
1966
1967                         bt_cb(skb)->pkt_type = type;
1968                         hci_recv_frame(skb);
1969
1970                         hdev->reassembly[index] = NULL;
1971                         return remain;
1972                 }
1973         }
1974
1975         return remain;
1976 }
1977
1978 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1979 {
1980         int rem = 0;
1981
1982         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1983                 return -EILSEQ;
1984
1985         while (count) {
1986                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1987                 if (rem < 0)
1988                         return rem;
1989
1990                 data += (count - rem);
1991                 count = rem;
1992         }
1993
1994         return rem;
1995 }
1996 EXPORT_SYMBOL(hci_recv_fragment);
1997
1998 #define STREAM_REASSEMBLY 0
1999
2000 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2001 {
2002         int type;
2003         int rem = 0;
2004
2005         while (count) {
2006                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2007
2008                 if (!skb) {
2009                         struct { char type; } *pkt;
2010
2011                         /* Start of the frame */
2012                         pkt = data;
2013                         type = pkt->type;
2014
2015                         data++;
2016                         count--;
2017                 } else
2018                         type = bt_cb(skb)->pkt_type;
2019
2020                 rem = hci_reassembly(hdev, type, data, count,
2021                                      STREAM_REASSEMBLY);
2022                 if (rem < 0)
2023                         return rem;
2024
2025                 data += (count - rem);
2026                 count = rem;
2027         }
2028
2029         return rem;
2030 }
2031 EXPORT_SYMBOL(hci_recv_stream_fragment);
2032
2033 /* ---- Interface to upper protocols ---- */
2034
2035 int hci_register_cb(struct hci_cb *cb)
2036 {
2037         BT_DBG("%p name %s", cb, cb->name);
2038
2039         write_lock(&hci_cb_list_lock);
2040         list_add(&cb->list, &hci_cb_list);
2041         write_unlock(&hci_cb_list_lock);
2042
2043         return 0;
2044 }
2045 EXPORT_SYMBOL(hci_register_cb);
2046
2047 int hci_unregister_cb(struct hci_cb *cb)
2048 {
2049         BT_DBG("%p name %s", cb, cb->name);
2050
2051         write_lock(&hci_cb_list_lock);
2052         list_del(&cb->list);
2053         write_unlock(&hci_cb_list_lock);
2054
2055         return 0;
2056 }
2057 EXPORT_SYMBOL(hci_unregister_cb);
2058
2059 static int hci_send_frame(struct sk_buff *skb)
2060 {
2061         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2062
2063         if (!hdev) {
2064                 kfree_skb(skb);
2065                 return -ENODEV;
2066         }
2067
2068         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2069
2070         /* Time stamp */
2071         __net_timestamp(skb);
2072
2073         /* Send copy to monitor */
2074         hci_send_to_monitor(hdev, skb);
2075
2076         if (atomic_read(&hdev->promisc)) {
2077                 /* Send copy to the sockets */
2078                 hci_send_to_sock(hdev, skb);
2079         }
2080
2081         /* Get rid of skb owner, prior to sending to the driver. */
2082         skb_orphan(skb);
2083
2084         return hdev->send(skb);
2085 }
2086
2087 /* Send HCI command */
2088 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2089 {
2090         int len = HCI_COMMAND_HDR_SIZE + plen;
2091         struct hci_command_hdr *hdr;
2092         struct sk_buff *skb;
2093
2094         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2095
2096         skb = bt_skb_alloc(len, GFP_ATOMIC);
2097         if (!skb) {
2098                 BT_ERR("%s no memory for command", hdev->name);
2099                 return -ENOMEM;
2100         }
2101
2102         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2103         hdr->opcode = cpu_to_le16(opcode);
2104         hdr->plen   = plen;
2105
2106         if (plen)
2107                 memcpy(skb_put(skb, plen), param, plen);
2108
2109         BT_DBG("skb len %d", skb->len);
2110
2111         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2112         skb->dev = (void *) hdev;
2113
2114         if (test_bit(HCI_INIT, &hdev->flags))
2115                 hdev->init_last_cmd = opcode;
2116
2117         skb_queue_tail(&hdev->cmd_q, skb);
2118         queue_work(hdev->workqueue, &hdev->cmd_work);
2119
2120         return 0;
2121 }
2122
2123 /* Get data from the previously sent command */
2124 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2125 {
2126         struct hci_command_hdr *hdr;
2127
2128         if (!hdev->sent_cmd)
2129                 return NULL;
2130
2131         hdr = (void *) hdev->sent_cmd->data;
2132
2133         if (hdr->opcode != cpu_to_le16(opcode))
2134                 return NULL;
2135
2136         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2137
2138         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2139 }
2140
2141 /* Send ACL data */
2142 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2143 {
2144         struct hci_acl_hdr *hdr;
2145         int len = skb->len;
2146
2147         skb_push(skb, HCI_ACL_HDR_SIZE);
2148         skb_reset_transport_header(skb);
2149         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2150         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2151         hdr->dlen   = cpu_to_le16(len);
2152 }
2153
2154 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2155                           struct sk_buff *skb, __u16 flags)
2156 {
2157         struct hci_conn *conn = chan->conn;
2158         struct hci_dev *hdev = conn->hdev;
2159         struct sk_buff *list;
2160
2161         skb->len = skb_headlen(skb);
2162         skb->data_len = 0;
2163
2164         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2165         hci_add_acl_hdr(skb, conn->handle, flags);
2166
2167         list = skb_shinfo(skb)->frag_list;
2168         if (!list) {
2169                 /* Non fragmented */
2170                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2171
2172                 skb_queue_tail(queue, skb);
2173         } else {
2174                 /* Fragmented */
2175                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2176
2177                 skb_shinfo(skb)->frag_list = NULL;
2178
2179                 /* Queue all fragments atomically */
2180                 spin_lock(&queue->lock);
2181
2182                 __skb_queue_tail(queue, skb);
2183
2184                 flags &= ~ACL_START;
2185                 flags |= ACL_CONT;
2186                 do {
2187                         skb = list; list = list->next;
2188
2189                         skb->dev = (void *) hdev;
2190                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2191                         hci_add_acl_hdr(skb, conn->handle, flags);
2192
2193                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2194
2195                         __skb_queue_tail(queue, skb);
2196                 } while (list);
2197
2198                 spin_unlock(&queue->lock);
2199         }
2200 }
2201
2202 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2203 {
2204         struct hci_dev *hdev = chan->conn->hdev;
2205
2206         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2207
2208         skb->dev = (void *) hdev;
2209
2210         hci_queue_acl(chan, &chan->data_q, skb, flags);
2211
2212         queue_work(hdev->workqueue, &hdev->tx_work);
2213 }
2214
2215 /* Send SCO data */
2216 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2217 {
2218         struct hci_dev *hdev = conn->hdev;
2219         struct hci_sco_hdr hdr;
2220
2221         BT_DBG("%s len %d", hdev->name, skb->len);
2222
2223         hdr.handle = cpu_to_le16(conn->handle);
2224         hdr.dlen   = skb->len;
2225
2226         skb_push(skb, HCI_SCO_HDR_SIZE);
2227         skb_reset_transport_header(skb);
2228         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2229
2230         skb->dev = (void *) hdev;
2231         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2232
2233         skb_queue_tail(&conn->data_q, skb);
2234         queue_work(hdev->workqueue, &hdev->tx_work);
2235 }
2236
2237 /* ---- HCI TX task (outgoing data) ---- */
2238
2239 /* HCI Connection scheduler */
2240 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2241                                      int *quote)
2242 {
2243         struct hci_conn_hash *h = &hdev->conn_hash;
2244         struct hci_conn *conn = NULL, *c;
2245         unsigned int num = 0, min = ~0;
2246
2247         /* We don't have to lock device here. Connections are always
2248          * added and removed with TX task disabled. */
2249
2250         rcu_read_lock();
2251
2252         list_for_each_entry_rcu(c, &h->list, list) {
2253                 if (c->type != type || skb_queue_empty(&c->data_q))
2254                         continue;
2255
2256                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2257                         continue;
2258
2259                 num++;
2260
2261                 if (c->sent < min) {
2262                         min  = c->sent;
2263                         conn = c;
2264                 }
2265
2266                 if (hci_conn_num(hdev, type) == num)
2267                         break;
2268         }
2269
2270         rcu_read_unlock();
2271
2272         if (conn) {
2273                 int cnt, q;
2274
2275                 switch (conn->type) {
2276                 case ACL_LINK:
2277                         cnt = hdev->acl_cnt;
2278                         break;
2279                 case SCO_LINK:
2280                 case ESCO_LINK:
2281                         cnt = hdev->sco_cnt;
2282                         break;
2283                 case LE_LINK:
2284                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2285                         break;
2286                 default:
2287                         cnt = 0;
2288                         BT_ERR("Unknown link type");
2289                 }
2290
2291                 q = cnt / num;
2292                 *quote = q ? q : 1;
2293         } else
2294                 *quote = 0;
2295
2296         BT_DBG("conn %p quote %d", conn, *quote);
2297         return conn;
2298 }
2299
2300 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2301 {
2302         struct hci_conn_hash *h = &hdev->conn_hash;
2303         struct hci_conn *c;
2304
2305         BT_ERR("%s link tx timeout", hdev->name);
2306
2307         rcu_read_lock();
2308
2309         /* Kill stalled connections */
2310         list_for_each_entry_rcu(c, &h->list, list) {
2311                 if (c->type == type && c->sent) {
2312                         BT_ERR("%s killing stalled connection %pMR",
2313                                hdev->name, &c->dst);
2314                         hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2315                 }
2316         }
2317
2318         rcu_read_unlock();
2319 }
2320
2321 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2322                                       int *quote)
2323 {
2324         struct hci_conn_hash *h = &hdev->conn_hash;
2325         struct hci_chan *chan = NULL;
2326         unsigned int num = 0, min = ~0, cur_prio = 0;
2327         struct hci_conn *conn;
2328         int cnt, q, conn_num = 0;
2329
2330         BT_DBG("%s", hdev->name);
2331
2332         rcu_read_lock();
2333
2334         list_for_each_entry_rcu(conn, &h->list, list) {
2335                 struct hci_chan *tmp;
2336
2337                 if (conn->type != type)
2338                         continue;
2339
2340                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2341                         continue;
2342
2343                 conn_num++;
2344
2345                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2346                         struct sk_buff *skb;
2347
2348                         if (skb_queue_empty(&tmp->data_q))
2349                                 continue;
2350
2351                         skb = skb_peek(&tmp->data_q);
2352                         if (skb->priority < cur_prio)
2353                                 continue;
2354
2355                         if (skb->priority > cur_prio) {
2356                                 num = 0;
2357                                 min = ~0;
2358                                 cur_prio = skb->priority;
2359                         }
2360
2361                         num++;
2362
2363                         if (conn->sent < min) {
2364                                 min  = conn->sent;
2365                                 chan = tmp;
2366                         }
2367                 }
2368
2369                 if (hci_conn_num(hdev, type) == conn_num)
2370                         break;
2371         }
2372
2373         rcu_read_unlock();
2374
2375         if (!chan)
2376                 return NULL;
2377
2378         switch (chan->conn->type) {
2379         case ACL_LINK:
2380                 cnt = hdev->acl_cnt;
2381                 break;
2382         case AMP_LINK:
2383                 cnt = hdev->block_cnt;
2384                 break;
2385         case SCO_LINK:
2386         case ESCO_LINK:
2387                 cnt = hdev->sco_cnt;
2388                 break;
2389         case LE_LINK:
2390                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2391                 break;
2392         default:
2393                 cnt = 0;
2394                 BT_ERR("Unknown link type");
2395         }
2396
2397         q = cnt / num;
2398         *quote = q ? q : 1;
2399         BT_DBG("chan %p quote %d", chan, *quote);
2400         return chan;
2401 }
2402
2403 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2404 {
2405         struct hci_conn_hash *h = &hdev->conn_hash;
2406         struct hci_conn *conn;
2407         int num = 0;
2408
2409         BT_DBG("%s", hdev->name);
2410
2411         rcu_read_lock();
2412
2413         list_for_each_entry_rcu(conn, &h->list, list) {
2414                 struct hci_chan *chan;
2415
2416                 if (conn->type != type)
2417                         continue;
2418
2419                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2420                         continue;
2421
2422                 num++;
2423
2424                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2425                         struct sk_buff *skb;
2426
2427                         if (chan->sent) {
2428                                 chan->sent = 0;
2429                                 continue;
2430                         }
2431
2432                         if (skb_queue_empty(&chan->data_q))
2433                                 continue;
2434
2435                         skb = skb_peek(&chan->data_q);
2436                         if (skb->priority >= HCI_PRIO_MAX - 1)
2437                                 continue;
2438
2439                         skb->priority = HCI_PRIO_MAX - 1;
2440
2441                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2442                                skb->priority);
2443                 }
2444
2445                 if (hci_conn_num(hdev, type) == num)
2446                         break;
2447         }
2448
2449         rcu_read_unlock();
2450
2451 }
2452
2453 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2454 {
2455         /* Calculate count of blocks used by this packet */
2456         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2457 }
2458
2459 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2460 {
2461         if (!test_bit(HCI_RAW, &hdev->flags)) {
2462                 /* ACL tx timeout must be longer than maximum
2463                  * link supervision timeout (40.9 seconds) */
2464                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2465                                        HCI_ACL_TX_TIMEOUT))
2466                         hci_link_tx_to(hdev, ACL_LINK);
2467         }
2468 }
2469
2470 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2471 {
2472         unsigned int cnt = hdev->acl_cnt;
2473         struct hci_chan *chan;
2474         struct sk_buff *skb;
2475         int quote;
2476
2477         __check_timeout(hdev, cnt);
2478
2479         while (hdev->acl_cnt &&
2480                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2481                 u32 priority = (skb_peek(&chan->data_q))->priority;
2482                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2483                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2484                                skb->len, skb->priority);
2485
2486                         /* Stop if priority has changed */
2487                         if (skb->priority < priority)
2488                                 break;
2489
2490                         skb = skb_dequeue(&chan->data_q);
2491
2492                         hci_conn_enter_active_mode(chan->conn,
2493                                                    bt_cb(skb)->force_active);
2494
2495                         hci_send_frame(skb);
2496                         hdev->acl_last_tx = jiffies;
2497
2498                         hdev->acl_cnt--;
2499                         chan->sent++;
2500                         chan->conn->sent++;
2501                 }
2502         }
2503
2504         if (cnt != hdev->acl_cnt)
2505                 hci_prio_recalculate(hdev, ACL_LINK);
2506 }
2507
2508 static void hci_sched_acl_blk(struct hci_dev *hdev)
2509 {
2510         unsigned int cnt = hdev->block_cnt;
2511         struct hci_chan *chan;
2512         struct sk_buff *skb;
2513         int quote;
2514         u8 type;
2515
2516         __check_timeout(hdev, cnt);
2517
2518         BT_DBG("%s", hdev->name);
2519
2520         if (hdev->dev_type == HCI_AMP)
2521                 type = AMP_LINK;
2522         else
2523                 type = ACL_LINK;
2524
2525         while (hdev->block_cnt > 0 &&
2526                (chan = hci_chan_sent(hdev, type, &quote))) {
2527                 u32 priority = (skb_peek(&chan->data_q))->priority;
2528                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2529                         int blocks;
2530
2531                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2532                                skb->len, skb->priority);
2533
2534                         /* Stop if priority has changed */
2535                         if (skb->priority < priority)
2536                                 break;
2537
2538                         skb = skb_dequeue(&chan->data_q);
2539
2540                         blocks = __get_blocks(hdev, skb);
2541                         if (blocks > hdev->block_cnt)
2542                                 return;
2543
2544                         hci_conn_enter_active_mode(chan->conn,
2545                                                    bt_cb(skb)->force_active);
2546
2547                         hci_send_frame(skb);
2548                         hdev->acl_last_tx = jiffies;
2549
2550                         hdev->block_cnt -= blocks;
2551                         quote -= blocks;
2552
2553                         chan->sent += blocks;
2554                         chan->conn->sent += blocks;
2555                 }
2556         }
2557
2558         if (cnt != hdev->block_cnt)
2559                 hci_prio_recalculate(hdev, type);
2560 }
2561
2562 static void hci_sched_acl(struct hci_dev *hdev)
2563 {
2564         BT_DBG("%s", hdev->name);
2565
2566         /* No ACL link over BR/EDR controller */
2567         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2568                 return;
2569
2570         /* No AMP link over AMP controller */
2571         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
2572                 return;
2573
2574         switch (hdev->flow_ctl_mode) {
2575         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2576                 hci_sched_acl_pkt(hdev);
2577                 break;
2578
2579         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2580                 hci_sched_acl_blk(hdev);
2581                 break;
2582         }
2583 }
2584
2585 /* Schedule SCO */
2586 static void hci_sched_sco(struct hci_dev *hdev)
2587 {
2588         struct hci_conn *conn;
2589         struct sk_buff *skb;
2590         int quote;
2591
2592         BT_DBG("%s", hdev->name);
2593
2594         if (!hci_conn_num(hdev, SCO_LINK))
2595                 return;
2596
2597         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2598                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2599                         BT_DBG("skb %p len %d", skb, skb->len);
2600                         hci_send_frame(skb);
2601
2602                         conn->sent++;
2603                         if (conn->sent == ~0)
2604                                 conn->sent = 0;
2605                 }
2606         }
2607 }
2608
2609 static void hci_sched_esco(struct hci_dev *hdev)
2610 {
2611         struct hci_conn *conn;
2612         struct sk_buff *skb;
2613         int quote;
2614
2615         BT_DBG("%s", hdev->name);
2616
2617         if (!hci_conn_num(hdev, ESCO_LINK))
2618                 return;
2619
2620         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2621                                                      &quote))) {
2622                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2623                         BT_DBG("skb %p len %d", skb, skb->len);
2624                         hci_send_frame(skb);
2625
2626                         conn->sent++;
2627                         if (conn->sent == ~0)
2628                                 conn->sent = 0;
2629                 }
2630         }
2631 }
2632
2633 static void hci_sched_le(struct hci_dev *hdev)
2634 {
2635         struct hci_chan *chan;
2636         struct sk_buff *skb;
2637         int quote, cnt, tmp;
2638
2639         BT_DBG("%s", hdev->name);
2640
2641         if (!hci_conn_num(hdev, LE_LINK))
2642                 return;
2643
2644         if (!test_bit(HCI_RAW, &hdev->flags)) {
2645                 /* LE tx timeout must be longer than maximum
2646                  * link supervision timeout (40.9 seconds) */
2647                 if (!hdev->le_cnt && hdev->le_pkts &&
2648                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
2649                         hci_link_tx_to(hdev, LE_LINK);
2650         }
2651
2652         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2653         tmp = cnt;
2654         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2655                 u32 priority = (skb_peek(&chan->data_q))->priority;
2656                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2657                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2658                                skb->len, skb->priority);
2659
2660                         /* Stop if priority has changed */
2661                         if (skb->priority < priority)
2662                                 break;
2663
2664                         skb = skb_dequeue(&chan->data_q);
2665
2666                         hci_send_frame(skb);
2667                         hdev->le_last_tx = jiffies;
2668
2669                         cnt--;
2670                         chan->sent++;
2671                         chan->conn->sent++;
2672                 }
2673         }
2674
2675         if (hdev->le_pkts)
2676                 hdev->le_cnt = cnt;
2677         else
2678                 hdev->acl_cnt = cnt;
2679
2680         if (cnt != tmp)
2681                 hci_prio_recalculate(hdev, LE_LINK);
2682 }
2683
2684 static void hci_tx_work(struct work_struct *work)
2685 {
2686         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2687         struct sk_buff *skb;
2688
2689         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2690                hdev->sco_cnt, hdev->le_cnt);
2691
2692         /* Schedule queues and send stuff to HCI driver */
2693
2694         hci_sched_acl(hdev);
2695
2696         hci_sched_sco(hdev);
2697
2698         hci_sched_esco(hdev);
2699
2700         hci_sched_le(hdev);
2701
2702         /* Send next queued raw (unknown type) packet */
2703         while ((skb = skb_dequeue(&hdev->raw_q)))
2704                 hci_send_frame(skb);
2705 }
2706
2707 /* ----- HCI RX task (incoming data processing) ----- */
2708
2709 /* ACL data packet */
2710 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2711 {
2712         struct hci_acl_hdr *hdr = (void *) skb->data;
2713         struct hci_conn *conn;
2714         __u16 handle, flags;
2715
2716         skb_pull(skb, HCI_ACL_HDR_SIZE);
2717
2718         handle = __le16_to_cpu(hdr->handle);
2719         flags  = hci_flags(handle);
2720         handle = hci_handle(handle);
2721
2722         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2723                handle, flags);
2724
2725         hdev->stat.acl_rx++;
2726
2727         hci_dev_lock(hdev);
2728         conn = hci_conn_hash_lookup_handle(hdev, handle);
2729         hci_dev_unlock(hdev);
2730
2731         if (conn) {
2732                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2733
2734                 hci_dev_lock(hdev);
2735                 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2736                     !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2737                         mgmt_device_connected(hdev, &conn->dst, conn->type,
2738                                               conn->dst_type, 0, NULL, 0,
2739                                               conn->dev_class);
2740                 hci_dev_unlock(hdev);
2741
2742                 /* Send to upper protocol */
2743                 l2cap_recv_acldata(conn, skb, flags);
2744                 return;
2745         } else {
2746                 BT_ERR("%s ACL packet for unknown connection handle %d",
2747                        hdev->name, handle);
2748         }
2749
2750         kfree_skb(skb);
2751 }
2752
2753 /* SCO data packet */
2754 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2755 {
2756         struct hci_sco_hdr *hdr = (void *) skb->data;
2757         struct hci_conn *conn;
2758         __u16 handle;
2759
2760         skb_pull(skb, HCI_SCO_HDR_SIZE);
2761
2762         handle = __le16_to_cpu(hdr->handle);
2763
2764         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2765
2766         hdev->stat.sco_rx++;
2767
2768         hci_dev_lock(hdev);
2769         conn = hci_conn_hash_lookup_handle(hdev, handle);
2770         hci_dev_unlock(hdev);
2771
2772         if (conn) {
2773                 /* Send to upper protocol */
2774                 sco_recv_scodata(conn, skb);
2775                 return;
2776         } else {
2777                 BT_ERR("%s SCO packet for unknown connection handle %d",
2778                        hdev->name, handle);
2779         }
2780
2781         kfree_skb(skb);
2782 }
2783
2784 static void hci_rx_work(struct work_struct *work)
2785 {
2786         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2787         struct sk_buff *skb;
2788
2789         BT_DBG("%s", hdev->name);
2790
2791         while ((skb = skb_dequeue(&hdev->rx_q))) {
2792                 /* Send copy to monitor */
2793                 hci_send_to_monitor(hdev, skb);
2794
2795                 if (atomic_read(&hdev->promisc)) {
2796                         /* Send copy to the sockets */
2797                         hci_send_to_sock(hdev, skb);
2798                 }
2799
2800                 if (test_bit(HCI_RAW, &hdev->flags)) {
2801                         kfree_skb(skb);
2802                         continue;
2803                 }
2804
2805                 if (test_bit(HCI_INIT, &hdev->flags)) {
2806                         /* Don't process data packets in this states. */
2807                         switch (bt_cb(skb)->pkt_type) {
2808                         case HCI_ACLDATA_PKT:
2809                         case HCI_SCODATA_PKT:
2810                                 kfree_skb(skb);
2811                                 continue;
2812                         }
2813                 }
2814
2815                 /* Process frame */
2816                 switch (bt_cb(skb)->pkt_type) {
2817                 case HCI_EVENT_PKT:
2818                         BT_DBG("%s Event packet", hdev->name);
2819                         hci_event_packet(hdev, skb);
2820                         break;
2821
2822                 case HCI_ACLDATA_PKT:
2823                         BT_DBG("%s ACL data packet", hdev->name);
2824                         hci_acldata_packet(hdev, skb);
2825                         break;
2826
2827                 case HCI_SCODATA_PKT:
2828                         BT_DBG("%s SCO data packet", hdev->name);
2829                         hci_scodata_packet(hdev, skb);
2830                         break;
2831
2832                 default:
2833                         kfree_skb(skb);
2834                         break;
2835                 }
2836         }
2837 }
2838
2839 static void hci_cmd_work(struct work_struct *work)
2840 {
2841         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2842         struct sk_buff *skb;
2843
2844         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2845                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2846
2847         /* Send queued commands */
2848         if (atomic_read(&hdev->cmd_cnt)) {
2849                 skb = skb_dequeue(&hdev->cmd_q);
2850                 if (!skb)
2851                         return;
2852
2853                 kfree_skb(hdev->sent_cmd);
2854
2855                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2856                 if (hdev->sent_cmd) {
2857                         atomic_dec(&hdev->cmd_cnt);
2858                         hci_send_frame(skb);
2859                         if (test_bit(HCI_RESET, &hdev->flags))
2860                                 del_timer(&hdev->cmd_timer);
2861                         else
2862                                 mod_timer(&hdev->cmd_timer,
2863                                           jiffies + HCI_CMD_TIMEOUT);
2864                 } else {
2865                         skb_queue_head(&hdev->cmd_q, skb);
2866                         queue_work(hdev->workqueue, &hdev->cmd_work);
2867                 }
2868         }
2869 }
2870
2871 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2872 {
2873         /* General inquiry access code (GIAC) */
2874         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2875         struct hci_cp_inquiry cp;
2876
2877         BT_DBG("%s", hdev->name);
2878
2879         if (test_bit(HCI_INQUIRY, &hdev->flags))
2880                 return -EINPROGRESS;
2881
2882         inquiry_cache_flush(hdev);
2883
2884         memset(&cp, 0, sizeof(cp));
2885         memcpy(&cp.lap, lap, sizeof(cp.lap));
2886         cp.length  = length;
2887
2888         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2889 }
2890
2891 int hci_cancel_inquiry(struct hci_dev *hdev)
2892 {
2893         BT_DBG("%s", hdev->name);
2894
2895         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2896                 return -EALREADY;
2897
2898         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2899 }
2900
2901 u8 bdaddr_to_le(u8 bdaddr_type)
2902 {
2903         switch (bdaddr_type) {
2904         case BDADDR_LE_PUBLIC:
2905                 return ADDR_LE_DEV_PUBLIC;
2906
2907         default:
2908                 /* Fallback to LE Random address type */
2909                 return ADDR_LE_DEV_RANDOM;
2910         }
2911 }