Bluetooth: Read list of local codecs supported by the controller
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE      0
60 #define HCI_REQ_PEND      1
61 #define HCI_REQ_CANCELED  2
62
63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70         hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76                              size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         char buf[3];
80
81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82         buf[1] = '\n';
83         buf[2] = '\0';
84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88                               size_t count, loff_t *ppos)
89 {
90         struct hci_dev *hdev = file->private_data;
91         struct sk_buff *skb;
92         char buf[32];
93         size_t buf_size = min(count, (sizeof(buf)-1));
94         bool enable;
95         int err;
96
97         if (!test_bit(HCI_UP, &hdev->flags))
98                 return -ENETDOWN;
99
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124
125         if (err < 0)
126                 return err;
127
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130         return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162
163         return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187
188         return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202
203 static int whitelist_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bdaddr_list *b;
207
208         hci_dev_lock(hdev);
209         list_for_each_entry(b, &hdev->whitelist, list)
210                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211         hci_dev_unlock(hdev);
212
213         return 0;
214 }
215
216 static int whitelist_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, whitelist_show, inode->i_private);
219 }
220
221 static const struct file_operations whitelist_fops = {
222         .open           = whitelist_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227
228 static int uuids_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct bt_uuid *uuid;
232
233         hci_dev_lock(hdev);
234         list_for_each_entry(uuid, &hdev->uuids, list) {
235                 u8 i, val[16];
236
237                 /* The Bluetooth UUID values are stored in big endian,
238                  * but with reversed byte order. So convert them into
239                  * the right order for the %pUb modifier.
240                  */
241                 for (i = 0; i < 16; i++)
242                         val[i] = uuid->uuid[15 - i];
243
244                 seq_printf(f, "%pUb\n", val);
245         }
246         hci_dev_unlock(hdev);
247
248         return 0;
249 }
250
251 static int uuids_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, uuids_show, inode->i_private);
254 }
255
256 static const struct file_operations uuids_fops = {
257         .open           = uuids_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262
263 static int inquiry_cache_show(struct seq_file *f, void *p)
264 {
265         struct hci_dev *hdev = f->private;
266         struct discovery_state *cache = &hdev->discovery;
267         struct inquiry_entry *e;
268
269         hci_dev_lock(hdev);
270
271         list_for_each_entry(e, &cache->all, all) {
272                 struct inquiry_data *data = &e->data;
273                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274                            &data->bdaddr,
275                            data->pscan_rep_mode, data->pscan_period_mode,
276                            data->pscan_mode, data->dev_class[2],
277                            data->dev_class[1], data->dev_class[0],
278                            __le16_to_cpu(data->clock_offset),
279                            data->rssi, data->ssp_mode, e->timestamp);
280         }
281
282         hci_dev_unlock(hdev);
283
284         return 0;
285 }
286
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
288 {
289         return single_open(file, inquiry_cache_show, inode->i_private);
290 }
291
292 static const struct file_operations inquiry_cache_fops = {
293         .open           = inquiry_cache_open,
294         .read           = seq_read,
295         .llseek         = seq_lseek,
296         .release        = single_release,
297 };
298
299 static int link_keys_show(struct seq_file *f, void *ptr)
300 {
301         struct hci_dev *hdev = f->private;
302         struct list_head *p, *n;
303
304         hci_dev_lock(hdev);
305         list_for_each_safe(p, n, &hdev->link_keys) {
306                 struct link_key *key = list_entry(p, struct link_key, list);
307                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309         }
310         hci_dev_unlock(hdev);
311
312         return 0;
313 }
314
315 static int link_keys_open(struct inode *inode, struct file *file)
316 {
317         return single_open(file, link_keys_show, inode->i_private);
318 }
319
320 static const struct file_operations link_keys_fops = {
321         .open           = link_keys_open,
322         .read           = seq_read,
323         .llseek         = seq_lseek,
324         .release        = single_release,
325 };
326
327 static int dev_class_show(struct seq_file *f, void *ptr)
328 {
329         struct hci_dev *hdev = f->private;
330
331         hci_dev_lock(hdev);
332         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333                    hdev->dev_class[1], hdev->dev_class[0]);
334         hci_dev_unlock(hdev);
335
336         return 0;
337 }
338
339 static int dev_class_open(struct inode *inode, struct file *file)
340 {
341         return single_open(file, dev_class_show, inode->i_private);
342 }
343
344 static const struct file_operations dev_class_fops = {
345         .open           = dev_class_open,
346         .read           = seq_read,
347         .llseek         = seq_lseek,
348         .release        = single_release,
349 };
350
351 static int voice_setting_get(void *data, u64 *val)
352 {
353         struct hci_dev *hdev = data;
354
355         hci_dev_lock(hdev);
356         *val = hdev->voice_setting;
357         hci_dev_unlock(hdev);
358
359         return 0;
360 }
361
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363                         NULL, "0x%4.4llx\n");
364
365 static int auto_accept_delay_set(void *data, u64 val)
366 {
367         struct hci_dev *hdev = data;
368
369         hci_dev_lock(hdev);
370         hdev->auto_accept_delay = val;
371         hci_dev_unlock(hdev);
372
373         return 0;
374 }
375
376 static int auto_accept_delay_get(void *data, u64 *val)
377 {
378         struct hci_dev *hdev = data;
379
380         hci_dev_lock(hdev);
381         *val = hdev->auto_accept_delay;
382         hci_dev_unlock(hdev);
383
384         return 0;
385 }
386
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388                         auto_accept_delay_set, "%llu\n");
389
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391                                      size_t count, loff_t *ppos)
392 {
393         struct hci_dev *hdev = file->private_data;
394         char buf[3];
395
396         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
397         buf[1] = '\n';
398         buf[2] = '\0';
399         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400 }
401
402 static ssize_t force_sc_support_write(struct file *file,
403                                       const char __user *user_buf,
404                                       size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[32];
408         size_t buf_size = min(count, (sizeof(buf)-1));
409         bool enable;
410
411         if (test_bit(HCI_UP, &hdev->flags))
412                 return -EBUSY;
413
414         if (copy_from_user(buf, user_buf, buf_size))
415                 return -EFAULT;
416
417         buf[buf_size] = '\0';
418         if (strtobool(buf, &enable))
419                 return -EINVAL;
420
421         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
422                 return -EALREADY;
423
424         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
425
426         return count;
427 }
428
429 static const struct file_operations force_sc_support_fops = {
430         .open           = simple_open,
431         .read           = force_sc_support_read,
432         .write          = force_sc_support_write,
433         .llseek         = default_llseek,
434 };
435
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437                                  size_t count, loff_t *ppos)
438 {
439         struct hci_dev *hdev = file->private_data;
440         char buf[3];
441
442         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443         buf[1] = '\n';
444         buf[2] = '\0';
445         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446 }
447
448 static const struct file_operations sc_only_mode_fops = {
449         .open           = simple_open,
450         .read           = sc_only_mode_read,
451         .llseek         = default_llseek,
452 };
453
454 static int idle_timeout_set(void *data, u64 val)
455 {
456         struct hci_dev *hdev = data;
457
458         if (val != 0 && (val < 500 || val > 3600000))
459                 return -EINVAL;
460
461         hci_dev_lock(hdev);
462         hdev->idle_timeout = val;
463         hci_dev_unlock(hdev);
464
465         return 0;
466 }
467
468 static int idle_timeout_get(void *data, u64 *val)
469 {
470         struct hci_dev *hdev = data;
471
472         hci_dev_lock(hdev);
473         *val = hdev->idle_timeout;
474         hci_dev_unlock(hdev);
475
476         return 0;
477 }
478
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480                         idle_timeout_set, "%llu\n");
481
482 static int rpa_timeout_set(void *data, u64 val)
483 {
484         struct hci_dev *hdev = data;
485
486         /* Require the RPA timeout to be at least 30 seconds and at most
487          * 24 hours.
488          */
489         if (val < 30 || val > (60 * 60 * 24))
490                 return -EINVAL;
491
492         hci_dev_lock(hdev);
493         hdev->rpa_timeout = val;
494         hci_dev_unlock(hdev);
495
496         return 0;
497 }
498
499 static int rpa_timeout_get(void *data, u64 *val)
500 {
501         struct hci_dev *hdev = data;
502
503         hci_dev_lock(hdev);
504         *val = hdev->rpa_timeout;
505         hci_dev_unlock(hdev);
506
507         return 0;
508 }
509
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511                         rpa_timeout_set, "%llu\n");
512
513 static int sniff_min_interval_set(void *data, u64 val)
514 {
515         struct hci_dev *hdev = data;
516
517         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518                 return -EINVAL;
519
520         hci_dev_lock(hdev);
521         hdev->sniff_min_interval = val;
522         hci_dev_unlock(hdev);
523
524         return 0;
525 }
526
527 static int sniff_min_interval_get(void *data, u64 *val)
528 {
529         struct hci_dev *hdev = data;
530
531         hci_dev_lock(hdev);
532         *val = hdev->sniff_min_interval;
533         hci_dev_unlock(hdev);
534
535         return 0;
536 }
537
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539                         sniff_min_interval_set, "%llu\n");
540
541 static int sniff_max_interval_set(void *data, u64 val)
542 {
543         struct hci_dev *hdev = data;
544
545         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546                 return -EINVAL;
547
548         hci_dev_lock(hdev);
549         hdev->sniff_max_interval = val;
550         hci_dev_unlock(hdev);
551
552         return 0;
553 }
554
555 static int sniff_max_interval_get(void *data, u64 *val)
556 {
557         struct hci_dev *hdev = data;
558
559         hci_dev_lock(hdev);
560         *val = hdev->sniff_max_interval;
561         hci_dev_unlock(hdev);
562
563         return 0;
564 }
565
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567                         sniff_max_interval_set, "%llu\n");
568
569 static int conn_info_min_age_set(void *data, u64 val)
570 {
571         struct hci_dev *hdev = data;
572
573         if (val == 0 || val > hdev->conn_info_max_age)
574                 return -EINVAL;
575
576         hci_dev_lock(hdev);
577         hdev->conn_info_min_age = val;
578         hci_dev_unlock(hdev);
579
580         return 0;
581 }
582
583 static int conn_info_min_age_get(void *data, u64 *val)
584 {
585         struct hci_dev *hdev = data;
586
587         hci_dev_lock(hdev);
588         *val = hdev->conn_info_min_age;
589         hci_dev_unlock(hdev);
590
591         return 0;
592 }
593
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595                         conn_info_min_age_set, "%llu\n");
596
597 static int conn_info_max_age_set(void *data, u64 val)
598 {
599         struct hci_dev *hdev = data;
600
601         if (val == 0 || val < hdev->conn_info_min_age)
602                 return -EINVAL;
603
604         hci_dev_lock(hdev);
605         hdev->conn_info_max_age = val;
606         hci_dev_unlock(hdev);
607
608         return 0;
609 }
610
611 static int conn_info_max_age_get(void *data, u64 *val)
612 {
613         struct hci_dev *hdev = data;
614
615         hci_dev_lock(hdev);
616         *val = hdev->conn_info_max_age;
617         hci_dev_unlock(hdev);
618
619         return 0;
620 }
621
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623                         conn_info_max_age_set, "%llu\n");
624
625 static int identity_show(struct seq_file *f, void *p)
626 {
627         struct hci_dev *hdev = f->private;
628         bdaddr_t addr;
629         u8 addr_type;
630
631         hci_dev_lock(hdev);
632
633         hci_copy_identity_address(hdev, &addr, &addr_type);
634
635         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636                    16, hdev->irk, &hdev->rpa);
637
638         hci_dev_unlock(hdev);
639
640         return 0;
641 }
642
643 static int identity_open(struct inode *inode, struct file *file)
644 {
645         return single_open(file, identity_show, inode->i_private);
646 }
647
648 static const struct file_operations identity_fops = {
649         .open           = identity_open,
650         .read           = seq_read,
651         .llseek         = seq_lseek,
652         .release        = single_release,
653 };
654
655 static int random_address_show(struct seq_file *f, void *p)
656 {
657         struct hci_dev *hdev = f->private;
658
659         hci_dev_lock(hdev);
660         seq_printf(f, "%pMR\n", &hdev->random_addr);
661         hci_dev_unlock(hdev);
662
663         return 0;
664 }
665
666 static int random_address_open(struct inode *inode, struct file *file)
667 {
668         return single_open(file, random_address_show, inode->i_private);
669 }
670
671 static const struct file_operations random_address_fops = {
672         .open           = random_address_open,
673         .read           = seq_read,
674         .llseek         = seq_lseek,
675         .release        = single_release,
676 };
677
678 static int static_address_show(struct seq_file *f, void *p)
679 {
680         struct hci_dev *hdev = f->private;
681
682         hci_dev_lock(hdev);
683         seq_printf(f, "%pMR\n", &hdev->static_addr);
684         hci_dev_unlock(hdev);
685
686         return 0;
687 }
688
689 static int static_address_open(struct inode *inode, struct file *file)
690 {
691         return single_open(file, static_address_show, inode->i_private);
692 }
693
694 static const struct file_operations static_address_fops = {
695         .open           = static_address_open,
696         .read           = seq_read,
697         .llseek         = seq_lseek,
698         .release        = single_release,
699 };
700
701 static ssize_t force_static_address_read(struct file *file,
702                                          char __user *user_buf,
703                                          size_t count, loff_t *ppos)
704 {
705         struct hci_dev *hdev = file->private_data;
706         char buf[3];
707
708         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
709         buf[1] = '\n';
710         buf[2] = '\0';
711         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712 }
713
714 static ssize_t force_static_address_write(struct file *file,
715                                           const char __user *user_buf,
716                                           size_t count, loff_t *ppos)
717 {
718         struct hci_dev *hdev = file->private_data;
719         char buf[32];
720         size_t buf_size = min(count, (sizeof(buf)-1));
721         bool enable;
722
723         if (test_bit(HCI_UP, &hdev->flags))
724                 return -EBUSY;
725
726         if (copy_from_user(buf, user_buf, buf_size))
727                 return -EFAULT;
728
729         buf[buf_size] = '\0';
730         if (strtobool(buf, &enable))
731                 return -EINVAL;
732
733         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
734                 return -EALREADY;
735
736         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
737
738         return count;
739 }
740
741 static const struct file_operations force_static_address_fops = {
742         .open           = simple_open,
743         .read           = force_static_address_read,
744         .write          = force_static_address_write,
745         .llseek         = default_llseek,
746 };
747
748 static int white_list_show(struct seq_file *f, void *ptr)
749 {
750         struct hci_dev *hdev = f->private;
751         struct bdaddr_list *b;
752
753         hci_dev_lock(hdev);
754         list_for_each_entry(b, &hdev->le_white_list, list)
755                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756         hci_dev_unlock(hdev);
757
758         return 0;
759 }
760
761 static int white_list_open(struct inode *inode, struct file *file)
762 {
763         return single_open(file, white_list_show, inode->i_private);
764 }
765
766 static const struct file_operations white_list_fops = {
767         .open           = white_list_open,
768         .read           = seq_read,
769         .llseek         = seq_lseek,
770         .release        = single_release,
771 };
772
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774 {
775         struct hci_dev *hdev = f->private;
776         struct list_head *p, *n;
777
778         hci_dev_lock(hdev);
779         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782                            &irk->bdaddr, irk->addr_type,
783                            16, irk->val, &irk->rpa);
784         }
785         hci_dev_unlock(hdev);
786
787         return 0;
788 }
789
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791 {
792         return single_open(file, identity_resolving_keys_show,
793                            inode->i_private);
794 }
795
796 static const struct file_operations identity_resolving_keys_fops = {
797         .open           = identity_resolving_keys_open,
798         .read           = seq_read,
799         .llseek         = seq_lseek,
800         .release        = single_release,
801 };
802
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
804 {
805         struct hci_dev *hdev = f->private;
806         struct list_head *p, *n;
807
808         hci_dev_lock(hdev);
809         list_for_each_safe(p, n, &hdev->long_term_keys) {
810                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814                            __le64_to_cpu(ltk->rand), 16, ltk->val);
815         }
816         hci_dev_unlock(hdev);
817
818         return 0;
819 }
820
821 static int long_term_keys_open(struct inode *inode, struct file *file)
822 {
823         return single_open(file, long_term_keys_show, inode->i_private);
824 }
825
826 static const struct file_operations long_term_keys_fops = {
827         .open           = long_term_keys_open,
828         .read           = seq_read,
829         .llseek         = seq_lseek,
830         .release        = single_release,
831 };
832
833 static int conn_min_interval_set(void *data, u64 val)
834 {
835         struct hci_dev *hdev = data;
836
837         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838                 return -EINVAL;
839
840         hci_dev_lock(hdev);
841         hdev->le_conn_min_interval = val;
842         hci_dev_unlock(hdev);
843
844         return 0;
845 }
846
847 static int conn_min_interval_get(void *data, u64 *val)
848 {
849         struct hci_dev *hdev = data;
850
851         hci_dev_lock(hdev);
852         *val = hdev->le_conn_min_interval;
853         hci_dev_unlock(hdev);
854
855         return 0;
856 }
857
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859                         conn_min_interval_set, "%llu\n");
860
861 static int conn_max_interval_set(void *data, u64 val)
862 {
863         struct hci_dev *hdev = data;
864
865         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866                 return -EINVAL;
867
868         hci_dev_lock(hdev);
869         hdev->le_conn_max_interval = val;
870         hci_dev_unlock(hdev);
871
872         return 0;
873 }
874
875 static int conn_max_interval_get(void *data, u64 *val)
876 {
877         struct hci_dev *hdev = data;
878
879         hci_dev_lock(hdev);
880         *val = hdev->le_conn_max_interval;
881         hci_dev_unlock(hdev);
882
883         return 0;
884 }
885
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887                         conn_max_interval_set, "%llu\n");
888
889 static int conn_latency_set(void *data, u64 val)
890 {
891         struct hci_dev *hdev = data;
892
893         if (val > 0x01f3)
894                 return -EINVAL;
895
896         hci_dev_lock(hdev);
897         hdev->le_conn_latency = val;
898         hci_dev_unlock(hdev);
899
900         return 0;
901 }
902
903 static int conn_latency_get(void *data, u64 *val)
904 {
905         struct hci_dev *hdev = data;
906
907         hci_dev_lock(hdev);
908         *val = hdev->le_conn_latency;
909         hci_dev_unlock(hdev);
910
911         return 0;
912 }
913
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915                         conn_latency_set, "%llu\n");
916
917 static int supervision_timeout_set(void *data, u64 val)
918 {
919         struct hci_dev *hdev = data;
920
921         if (val < 0x000a || val > 0x0c80)
922                 return -EINVAL;
923
924         hci_dev_lock(hdev);
925         hdev->le_supv_timeout = val;
926         hci_dev_unlock(hdev);
927
928         return 0;
929 }
930
931 static int supervision_timeout_get(void *data, u64 *val)
932 {
933         struct hci_dev *hdev = data;
934
935         hci_dev_lock(hdev);
936         *val = hdev->le_supv_timeout;
937         hci_dev_unlock(hdev);
938
939         return 0;
940 }
941
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943                         supervision_timeout_set, "%llu\n");
944
945 static int adv_channel_map_set(void *data, u64 val)
946 {
947         struct hci_dev *hdev = data;
948
949         if (val < 0x01 || val > 0x07)
950                 return -EINVAL;
951
952         hci_dev_lock(hdev);
953         hdev->le_adv_channel_map = val;
954         hci_dev_unlock(hdev);
955
956         return 0;
957 }
958
959 static int adv_channel_map_get(void *data, u64 *val)
960 {
961         struct hci_dev *hdev = data;
962
963         hci_dev_lock(hdev);
964         *val = hdev->le_adv_channel_map;
965         hci_dev_unlock(hdev);
966
967         return 0;
968 }
969
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971                         adv_channel_map_set, "%llu\n");
972
973 static int device_list_show(struct seq_file *f, void *ptr)
974 {
975         struct hci_dev *hdev = f->private;
976         struct hci_conn_params *p;
977
978         hci_dev_lock(hdev);
979         list_for_each_entry(p, &hdev->le_conn_params, list) {
980                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
981                            p->auto_connect);
982         }
983         hci_dev_unlock(hdev);
984
985         return 0;
986 }
987
988 static int device_list_open(struct inode *inode, struct file *file)
989 {
990         return single_open(file, device_list_show, inode->i_private);
991 }
992
993 static const struct file_operations device_list_fops = {
994         .open           = device_list_open,
995         .read           = seq_read,
996         .llseek         = seq_lseek,
997         .release        = single_release,
998 };
999
1000 /* ---- HCI requests ---- */
1001
1002 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1003 {
1004         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1005
1006         if (hdev->req_status == HCI_REQ_PEND) {
1007                 hdev->req_result = result;
1008                 hdev->req_status = HCI_REQ_DONE;
1009                 wake_up_interruptible(&hdev->req_wait_q);
1010         }
1011 }
1012
1013 static void hci_req_cancel(struct hci_dev *hdev, int err)
1014 {
1015         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1016
1017         if (hdev->req_status == HCI_REQ_PEND) {
1018                 hdev->req_result = err;
1019                 hdev->req_status = HCI_REQ_CANCELED;
1020                 wake_up_interruptible(&hdev->req_wait_q);
1021         }
1022 }
1023
1024 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1025                                             u8 event)
1026 {
1027         struct hci_ev_cmd_complete *ev;
1028         struct hci_event_hdr *hdr;
1029         struct sk_buff *skb;
1030
1031         hci_dev_lock(hdev);
1032
1033         skb = hdev->recv_evt;
1034         hdev->recv_evt = NULL;
1035
1036         hci_dev_unlock(hdev);
1037
1038         if (!skb)
1039                 return ERR_PTR(-ENODATA);
1040
1041         if (skb->len < sizeof(*hdr)) {
1042                 BT_ERR("Too short HCI event");
1043                 goto failed;
1044         }
1045
1046         hdr = (void *) skb->data;
1047         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1048
1049         if (event) {
1050                 if (hdr->evt != event)
1051                         goto failed;
1052                 return skb;
1053         }
1054
1055         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1057                 goto failed;
1058         }
1059
1060         if (skb->len < sizeof(*ev)) {
1061                 BT_ERR("Too short cmd_complete event");
1062                 goto failed;
1063         }
1064
1065         ev = (void *) skb->data;
1066         skb_pull(skb, sizeof(*ev));
1067
1068         if (opcode == __le16_to_cpu(ev->opcode))
1069                 return skb;
1070
1071         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072                __le16_to_cpu(ev->opcode));
1073
1074 failed:
1075         kfree_skb(skb);
1076         return ERR_PTR(-ENODATA);
1077 }
1078
1079 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1080                                   const void *param, u8 event, u32 timeout)
1081 {
1082         DECLARE_WAITQUEUE(wait, current);
1083         struct hci_request req;
1084         int err = 0;
1085
1086         BT_DBG("%s", hdev->name);
1087
1088         hci_req_init(&req, hdev);
1089
1090         hci_req_add_ev(&req, opcode, plen, param, event);
1091
1092         hdev->req_status = HCI_REQ_PEND;
1093
1094         err = hci_req_run(&req, hci_req_sync_complete);
1095         if (err < 0)
1096                 return ERR_PTR(err);
1097
1098         add_wait_queue(&hdev->req_wait_q, &wait);
1099         set_current_state(TASK_INTERRUPTIBLE);
1100
1101         schedule_timeout(timeout);
1102
1103         remove_wait_queue(&hdev->req_wait_q, &wait);
1104
1105         if (signal_pending(current))
1106                 return ERR_PTR(-EINTR);
1107
1108         switch (hdev->req_status) {
1109         case HCI_REQ_DONE:
1110                 err = -bt_to_errno(hdev->req_result);
1111                 break;
1112
1113         case HCI_REQ_CANCELED:
1114                 err = -hdev->req_result;
1115                 break;
1116
1117         default:
1118                 err = -ETIMEDOUT;
1119                 break;
1120         }
1121
1122         hdev->req_status = hdev->req_result = 0;
1123
1124         BT_DBG("%s end: err %d", hdev->name, err);
1125
1126         if (err < 0)
1127                 return ERR_PTR(err);
1128
1129         return hci_get_cmd_complete(hdev, opcode, event);
1130 }
1131 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1132
1133 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1134                                const void *param, u32 timeout)
1135 {
1136         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1137 }
1138 EXPORT_SYMBOL(__hci_cmd_sync);
1139
1140 /* Execute request and wait for completion. */
1141 static int __hci_req_sync(struct hci_dev *hdev,
1142                           void (*func)(struct hci_request *req,
1143                                       unsigned long opt),
1144                           unsigned long opt, __u32 timeout)
1145 {
1146         struct hci_request req;
1147         DECLARE_WAITQUEUE(wait, current);
1148         int err = 0;
1149
1150         BT_DBG("%s start", hdev->name);
1151
1152         hci_req_init(&req, hdev);
1153
1154         hdev->req_status = HCI_REQ_PEND;
1155
1156         func(&req, opt);
1157
1158         err = hci_req_run(&req, hci_req_sync_complete);
1159         if (err < 0) {
1160                 hdev->req_status = 0;
1161
1162                 /* ENODATA means the HCI request command queue is empty.
1163                  * This can happen when a request with conditionals doesn't
1164                  * trigger any commands to be sent. This is normal behavior
1165                  * and should not trigger an error return.
1166                  */
1167                 if (err == -ENODATA)
1168                         return 0;
1169
1170                 return err;
1171         }
1172
1173         add_wait_queue(&hdev->req_wait_q, &wait);
1174         set_current_state(TASK_INTERRUPTIBLE);
1175
1176         schedule_timeout(timeout);
1177
1178         remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180         if (signal_pending(current))
1181                 return -EINTR;
1182
1183         switch (hdev->req_status) {
1184         case HCI_REQ_DONE:
1185                 err = -bt_to_errno(hdev->req_result);
1186                 break;
1187
1188         case HCI_REQ_CANCELED:
1189                 err = -hdev->req_result;
1190                 break;
1191
1192         default:
1193                 err = -ETIMEDOUT;
1194                 break;
1195         }
1196
1197         hdev->req_status = hdev->req_result = 0;
1198
1199         BT_DBG("%s end: err %d", hdev->name, err);
1200
1201         return err;
1202 }
1203
1204 static int hci_req_sync(struct hci_dev *hdev,
1205                         void (*req)(struct hci_request *req,
1206                                     unsigned long opt),
1207                         unsigned long opt, __u32 timeout)
1208 {
1209         int ret;
1210
1211         if (!test_bit(HCI_UP, &hdev->flags))
1212                 return -ENETDOWN;
1213
1214         /* Serialize all requests */
1215         hci_req_lock(hdev);
1216         ret = __hci_req_sync(hdev, req, opt, timeout);
1217         hci_req_unlock(hdev);
1218
1219         return ret;
1220 }
1221
1222 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1223 {
1224         BT_DBG("%s %ld", req->hdev->name, opt);
1225
1226         /* Reset device */
1227         set_bit(HCI_RESET, &req->hdev->flags);
1228         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1229 }
1230
1231 static void bredr_init(struct hci_request *req)
1232 {
1233         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1234
1235         /* Read Local Supported Features */
1236         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1237
1238         /* Read Local Version */
1239         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1240
1241         /* Read BD Address */
1242         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1243 }
1244
1245 static void amp_init(struct hci_request *req)
1246 {
1247         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1248
1249         /* Read Local Version */
1250         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1251
1252         /* Read Local Supported Commands */
1253         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1254
1255         /* Read Local Supported Features */
1256         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1257
1258         /* Read Local AMP Info */
1259         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1260
1261         /* Read Data Blk size */
1262         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1263
1264         /* Read Flow Control Mode */
1265         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1266
1267         /* Read Location Data */
1268         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1269 }
1270
1271 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1272 {
1273         struct hci_dev *hdev = req->hdev;
1274
1275         BT_DBG("%s %ld", hdev->name, opt);
1276
1277         /* Reset */
1278         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1279                 hci_reset_req(req, 0);
1280
1281         switch (hdev->dev_type) {
1282         case HCI_BREDR:
1283                 bredr_init(req);
1284                 break;
1285
1286         case HCI_AMP:
1287                 amp_init(req);
1288                 break;
1289
1290         default:
1291                 BT_ERR("Unknown device type %d", hdev->dev_type);
1292                 break;
1293         }
1294 }
1295
1296 static void bredr_setup(struct hci_request *req)
1297 {
1298         struct hci_dev *hdev = req->hdev;
1299
1300         __le16 param;
1301         __u8 flt_type;
1302
1303         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1304         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1305
1306         /* Read Class of Device */
1307         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1308
1309         /* Read Local Name */
1310         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1311
1312         /* Read Voice Setting */
1313         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1314
1315         /* Read Number of Supported IAC */
1316         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1317
1318         /* Read Current IAC LAP */
1319         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1320
1321         /* Clear Event Filters */
1322         flt_type = HCI_FLT_CLEAR_ALL;
1323         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1324
1325         /* Connection accept timeout ~20 secs */
1326         param = cpu_to_le16(0x7d00);
1327         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1328
1329         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330          * but it does not support page scan related HCI commands.
1331          */
1332         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1333                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1335         }
1336 }
1337
1338 static void le_setup(struct hci_request *req)
1339 {
1340         struct hci_dev *hdev = req->hdev;
1341
1342         /* Read LE Buffer Size */
1343         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1344
1345         /* Read LE Local Supported Features */
1346         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1347
1348         /* Read LE Supported States */
1349         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1350
1351         /* Read LE White List Size */
1352         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1353
1354         /* Clear LE White List */
1355         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1356
1357         /* LE-only controllers have LE implicitly enabled */
1358         if (!lmp_bredr_capable(hdev))
1359                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1360 }
1361
1362 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1363 {
1364         if (lmp_ext_inq_capable(hdev))
1365                 return 0x02;
1366
1367         if (lmp_inq_rssi_capable(hdev))
1368                 return 0x01;
1369
1370         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371             hdev->lmp_subver == 0x0757)
1372                 return 0x01;
1373
1374         if (hdev->manufacturer == 15) {
1375                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1376                         return 0x01;
1377                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1378                         return 0x01;
1379                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1380                         return 0x01;
1381         }
1382
1383         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384             hdev->lmp_subver == 0x1805)
1385                 return 0x01;
1386
1387         return 0x00;
1388 }
1389
1390 static void hci_setup_inquiry_mode(struct hci_request *req)
1391 {
1392         u8 mode;
1393
1394         mode = hci_get_inquiry_mode(req->hdev);
1395
1396         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1397 }
1398
1399 static void hci_setup_event_mask(struct hci_request *req)
1400 {
1401         struct hci_dev *hdev = req->hdev;
1402
1403         /* The second byte is 0xff instead of 0x9f (two reserved bits
1404          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405          * command otherwise.
1406          */
1407         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1408
1409         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410          * any event mask for pre 1.2 devices.
1411          */
1412         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1413                 return;
1414
1415         if (lmp_bredr_capable(hdev)) {
1416                 events[4] |= 0x01; /* Flow Specification Complete */
1417                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419                 events[5] |= 0x08; /* Synchronous Connection Complete */
1420                 events[5] |= 0x10; /* Synchronous Connection Changed */
1421         } else {
1422                 /* Use a different default for LE-only devices */
1423                 memset(events, 0, sizeof(events));
1424                 events[0] |= 0x10; /* Disconnection Complete */
1425                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426                 events[1] |= 0x20; /* Command Complete */
1427                 events[1] |= 0x40; /* Command Status */
1428                 events[1] |= 0x80; /* Hardware Error */
1429                 events[2] |= 0x04; /* Number of Completed Packets */
1430                 events[3] |= 0x02; /* Data Buffer Overflow */
1431
1432                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433                         events[0] |= 0x80; /* Encryption Change */
1434                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1435                 }
1436         }
1437
1438         if (lmp_inq_rssi_capable(hdev))
1439                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1440
1441         if (lmp_sniffsubr_capable(hdev))
1442                 events[5] |= 0x20; /* Sniff Subrating */
1443
1444         if (lmp_pause_enc_capable(hdev))
1445                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1446
1447         if (lmp_ext_inq_capable(hdev))
1448                 events[5] |= 0x40; /* Extended Inquiry Result */
1449
1450         if (lmp_no_flush_capable(hdev))
1451                 events[7] |= 0x01; /* Enhanced Flush Complete */
1452
1453         if (lmp_lsto_capable(hdev))
1454                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1455
1456         if (lmp_ssp_capable(hdev)) {
1457                 events[6] |= 0x01;      /* IO Capability Request */
1458                 events[6] |= 0x02;      /* IO Capability Response */
1459                 events[6] |= 0x04;      /* User Confirmation Request */
1460                 events[6] |= 0x08;      /* User Passkey Request */
1461                 events[6] |= 0x10;      /* Remote OOB Data Request */
1462                 events[6] |= 0x20;      /* Simple Pairing Complete */
1463                 events[7] |= 0x04;      /* User Passkey Notification */
1464                 events[7] |= 0x08;      /* Keypress Notification */
1465                 events[7] |= 0x10;      /* Remote Host Supported
1466                                          * Features Notification
1467                                          */
1468         }
1469
1470         if (lmp_le_capable(hdev))
1471                 events[7] |= 0x20;      /* LE Meta-Event */
1472
1473         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1474 }
1475
1476 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1477 {
1478         struct hci_dev *hdev = req->hdev;
1479
1480         if (lmp_bredr_capable(hdev))
1481                 bredr_setup(req);
1482         else
1483                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1484
1485         if (lmp_le_capable(hdev))
1486                 le_setup(req);
1487
1488         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489          * local supported commands HCI command.
1490          */
1491         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1492                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1493
1494         if (lmp_ssp_capable(hdev)) {
1495                 /* When SSP is available, then the host features page
1496                  * should also be available as well. However some
1497                  * controllers list the max_page as 0 as long as SSP
1498                  * has not been enabled. To achieve proper debugging
1499                  * output, force the minimum max_page to 1 at least.
1500                  */
1501                 hdev->max_page = 0x01;
1502
1503                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1504                         u8 mode = 0x01;
1505                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506                                     sizeof(mode), &mode);
1507                 } else {
1508                         struct hci_cp_write_eir cp;
1509
1510                         memset(hdev->eir, 0, sizeof(hdev->eir));
1511                         memset(&cp, 0, sizeof(cp));
1512
1513                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1514                 }
1515         }
1516
1517         if (lmp_inq_rssi_capable(hdev))
1518                 hci_setup_inquiry_mode(req);
1519
1520         if (lmp_inq_tx_pwr_capable(hdev))
1521                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1522
1523         if (lmp_ext_feat_capable(hdev)) {
1524                 struct hci_cp_read_local_ext_features cp;
1525
1526                 cp.page = 0x01;
1527                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1528                             sizeof(cp), &cp);
1529         }
1530
1531         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1532                 u8 enable = 1;
1533                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1534                             &enable);
1535         }
1536 }
1537
1538 static void hci_setup_link_policy(struct hci_request *req)
1539 {
1540         struct hci_dev *hdev = req->hdev;
1541         struct hci_cp_write_def_link_policy cp;
1542         u16 link_policy = 0;
1543
1544         if (lmp_rswitch_capable(hdev))
1545                 link_policy |= HCI_LP_RSWITCH;
1546         if (lmp_hold_capable(hdev))
1547                 link_policy |= HCI_LP_HOLD;
1548         if (lmp_sniff_capable(hdev))
1549                 link_policy |= HCI_LP_SNIFF;
1550         if (lmp_park_capable(hdev))
1551                 link_policy |= HCI_LP_PARK;
1552
1553         cp.policy = cpu_to_le16(link_policy);
1554         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1555 }
1556
1557 static void hci_set_le_support(struct hci_request *req)
1558 {
1559         struct hci_dev *hdev = req->hdev;
1560         struct hci_cp_write_le_host_supported cp;
1561
1562         /* LE-only devices do not support explicit enablement */
1563         if (!lmp_bredr_capable(hdev))
1564                 return;
1565
1566         memset(&cp, 0, sizeof(cp));
1567
1568         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1569                 cp.le = 0x01;
1570                 cp.simul = lmp_le_br_capable(hdev);
1571         }
1572
1573         if (cp.le != lmp_host_le_capable(hdev))
1574                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1575                             &cp);
1576 }
1577
1578 static void hci_set_event_mask_page_2(struct hci_request *req)
1579 {
1580         struct hci_dev *hdev = req->hdev;
1581         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1582
1583         /* If Connectionless Slave Broadcast master role is supported
1584          * enable all necessary events for it.
1585          */
1586         if (lmp_csb_master_capable(hdev)) {
1587                 events[1] |= 0x40;      /* Triggered Clock Capture */
1588                 events[1] |= 0x80;      /* Synchronization Train Complete */
1589                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1590                 events[2] |= 0x20;      /* CSB Channel Map Change */
1591         }
1592
1593         /* If Connectionless Slave Broadcast slave role is supported
1594          * enable all necessary events for it.
1595          */
1596         if (lmp_csb_slave_capable(hdev)) {
1597                 events[2] |= 0x01;      /* Synchronization Train Received */
1598                 events[2] |= 0x02;      /* CSB Receive */
1599                 events[2] |= 0x04;      /* CSB Timeout */
1600                 events[2] |= 0x08;      /* Truncated Page Complete */
1601         }
1602
1603         /* Enable Authenticated Payload Timeout Expired event if supported */
1604         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1605                 events[2] |= 0x80;
1606
1607         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1608 }
1609
1610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1611 {
1612         struct hci_dev *hdev = req->hdev;
1613         u8 p;
1614
1615         hci_setup_event_mask(req);
1616
1617         /* Some Broadcom based Bluetooth controllers do not support the
1618          * Delete Stored Link Key command. They are clearly indicating its
1619          * absence in the bit mask of supported commands.
1620          *
1621          * Check the supported commands and only if the the command is marked
1622          * as supported send it. If not supported assume that the controller
1623          * does not have actual support for stored link keys which makes this
1624          * command redundant anyway.
1625          *
1626          * Some controllers indicate that they support handling deleting
1627          * stored link keys, but they don't. The quirk lets a driver
1628          * just disable this command.
1629          */
1630         if (hdev->commands[6] & 0x80 &&
1631             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1632                 struct hci_cp_delete_stored_link_key cp;
1633
1634                 bacpy(&cp.bdaddr, BDADDR_ANY);
1635                 cp.delete_all = 0x01;
1636                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1637                             sizeof(cp), &cp);
1638         }
1639
1640         if (hdev->commands[5] & 0x10)
1641                 hci_setup_link_policy(req);
1642
1643         if (lmp_le_capable(hdev)) {
1644                 u8 events[8];
1645
1646                 memset(events, 0, sizeof(events));
1647                 events[0] = 0x0f;
1648
1649                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1650                         events[0] |= 0x10;      /* LE Long Term Key Request */
1651
1652                 /* If controller supports the Connection Parameters Request
1653                  * Link Layer Procedure, enable the corresponding event.
1654                  */
1655                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1656                         events[0] |= 0x20;      /* LE Remote Connection
1657                                                  * Parameter Request
1658                                                  */
1659
1660                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1661                             events);
1662
1663                 if (hdev->commands[25] & 0x40) {
1664                         /* Read LE Advertising Channel TX Power */
1665                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1666                 }
1667
1668                 hci_set_le_support(req);
1669         }
1670
1671         /* Read features beyond page 1 if available */
1672         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1673                 struct hci_cp_read_local_ext_features cp;
1674
1675                 cp.page = p;
1676                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1677                             sizeof(cp), &cp);
1678         }
1679 }
1680
1681 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1682 {
1683         struct hci_dev *hdev = req->hdev;
1684
1685         /* Set event mask page 2 if the HCI command for it is supported */
1686         if (hdev->commands[22] & 0x04)
1687                 hci_set_event_mask_page_2(req);
1688
1689         /* Read local codec list if the HCI command is supported */
1690         if (hdev->commands[29] & 0x20)
1691                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1692
1693         /* Check for Synchronization Train support */
1694         if (lmp_sync_train_capable(hdev))
1695                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1696
1697         /* Enable Secure Connections if supported and configured */
1698         if ((lmp_sc_capable(hdev) ||
1699              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1700             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1701                 u8 support = 0x01;
1702                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1703                             sizeof(support), &support);
1704         }
1705 }
1706
1707 static int __hci_init(struct hci_dev *hdev)
1708 {
1709         int err;
1710
1711         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1712         if (err < 0)
1713                 return err;
1714
1715         /* The Device Under Test (DUT) mode is special and available for
1716          * all controller types. So just create it early on.
1717          */
1718         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1719                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1720                                     &dut_mode_fops);
1721         }
1722
1723         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1724          * BR/EDR/LE type controllers. AMP controllers only need the
1725          * first stage init.
1726          */
1727         if (hdev->dev_type != HCI_BREDR)
1728                 return 0;
1729
1730         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1731         if (err < 0)
1732                 return err;
1733
1734         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1735         if (err < 0)
1736                 return err;
1737
1738         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1739         if (err < 0)
1740                 return err;
1741
1742         /* Only create debugfs entries during the initial setup
1743          * phase and not every time the controller gets powered on.
1744          */
1745         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1746                 return 0;
1747
1748         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1749                             &features_fops);
1750         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1751                            &hdev->manufacturer);
1752         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1753         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1754         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1755                             &blacklist_fops);
1756         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1757                             &whitelist_fops);
1758         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1759
1760         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1761                             &conn_info_min_age_fops);
1762         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1763                             &conn_info_max_age_fops);
1764
1765         if (lmp_bredr_capable(hdev)) {
1766                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1767                                     hdev, &inquiry_cache_fops);
1768                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1769                                     hdev, &link_keys_fops);
1770                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1771                                     hdev, &dev_class_fops);
1772                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1773                                     hdev, &voice_setting_fops);
1774         }
1775
1776         if (lmp_ssp_capable(hdev)) {
1777                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1778                                     hdev, &auto_accept_delay_fops);
1779                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1780                                     hdev, &force_sc_support_fops);
1781                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1782                                     hdev, &sc_only_mode_fops);
1783         }
1784
1785         if (lmp_sniff_capable(hdev)) {
1786                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1787                                     hdev, &idle_timeout_fops);
1788                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1789                                     hdev, &sniff_min_interval_fops);
1790                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1791                                     hdev, &sniff_max_interval_fops);
1792         }
1793
1794         if (lmp_le_capable(hdev)) {
1795                 debugfs_create_file("identity", 0400, hdev->debugfs,
1796                                     hdev, &identity_fops);
1797                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1798                                     hdev, &rpa_timeout_fops);
1799                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1800                                     hdev, &random_address_fops);
1801                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1802                                     hdev, &static_address_fops);
1803
1804                 /* For controllers with a public address, provide a debug
1805                  * option to force the usage of the configured static
1806                  * address. By default the public address is used.
1807                  */
1808                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1809                         debugfs_create_file("force_static_address", 0644,
1810                                             hdev->debugfs, hdev,
1811                                             &force_static_address_fops);
1812
1813                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1814                                   &hdev->le_white_list_size);
1815                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1816                                     &white_list_fops);
1817                 debugfs_create_file("identity_resolving_keys", 0400,
1818                                     hdev->debugfs, hdev,
1819                                     &identity_resolving_keys_fops);
1820                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1821                                     hdev, &long_term_keys_fops);
1822                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1823                                     hdev, &conn_min_interval_fops);
1824                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1825                                     hdev, &conn_max_interval_fops);
1826                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1827                                     hdev, &conn_latency_fops);
1828                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1829                                     hdev, &supervision_timeout_fops);
1830                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1831                                     hdev, &adv_channel_map_fops);
1832                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1833                                     &device_list_fops);
1834                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1835                                    hdev->debugfs,
1836                                    &hdev->discov_interleaved_timeout);
1837         }
1838
1839         return 0;
1840 }
1841
1842 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1843 {
1844         struct hci_dev *hdev = req->hdev;
1845
1846         BT_DBG("%s %ld", hdev->name, opt);
1847
1848         /* Reset */
1849         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1850                 hci_reset_req(req, 0);
1851
1852         /* Read Local Version */
1853         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1854
1855         /* Read BD Address */
1856         if (hdev->set_bdaddr)
1857                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1858 }
1859
1860 static int __hci_unconf_init(struct hci_dev *hdev)
1861 {
1862         int err;
1863
1864         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1865                 return 0;
1866
1867         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1868         if (err < 0)
1869                 return err;
1870
1871         return 0;
1872 }
1873
1874 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1875 {
1876         __u8 scan = opt;
1877
1878         BT_DBG("%s %x", req->hdev->name, scan);
1879
1880         /* Inquiry and Page scans */
1881         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1882 }
1883
1884 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1885 {
1886         __u8 auth = opt;
1887
1888         BT_DBG("%s %x", req->hdev->name, auth);
1889
1890         /* Authentication */
1891         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1892 }
1893
1894 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1895 {
1896         __u8 encrypt = opt;
1897
1898         BT_DBG("%s %x", req->hdev->name, encrypt);
1899
1900         /* Encryption */
1901         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1902 }
1903
1904 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1905 {
1906         __le16 policy = cpu_to_le16(opt);
1907
1908         BT_DBG("%s %x", req->hdev->name, policy);
1909
1910         /* Default link policy */
1911         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1912 }
1913
1914 /* Get HCI device by index.
1915  * Device is held on return. */
1916 struct hci_dev *hci_dev_get(int index)
1917 {
1918         struct hci_dev *hdev = NULL, *d;
1919
1920         BT_DBG("%d", index);
1921
1922         if (index < 0)
1923                 return NULL;
1924
1925         read_lock(&hci_dev_list_lock);
1926         list_for_each_entry(d, &hci_dev_list, list) {
1927                 if (d->id == index) {
1928                         hdev = hci_dev_hold(d);
1929                         break;
1930                 }
1931         }
1932         read_unlock(&hci_dev_list_lock);
1933         return hdev;
1934 }
1935
1936 /* ---- Inquiry support ---- */
1937
1938 bool hci_discovery_active(struct hci_dev *hdev)
1939 {
1940         struct discovery_state *discov = &hdev->discovery;
1941
1942         switch (discov->state) {
1943         case DISCOVERY_FINDING:
1944         case DISCOVERY_RESOLVING:
1945                 return true;
1946
1947         default:
1948                 return false;
1949         }
1950 }
1951
1952 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1953 {
1954         int old_state = hdev->discovery.state;
1955
1956         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1957
1958         if (old_state == state)
1959                 return;
1960
1961         hdev->discovery.state = state;
1962
1963         switch (state) {
1964         case DISCOVERY_STOPPED:
1965                 hci_update_background_scan(hdev);
1966
1967                 if (old_state != DISCOVERY_STARTING)
1968                         mgmt_discovering(hdev, 0);
1969                 break;
1970         case DISCOVERY_STARTING:
1971                 break;
1972         case DISCOVERY_FINDING:
1973                 mgmt_discovering(hdev, 1);
1974                 break;
1975         case DISCOVERY_RESOLVING:
1976                 break;
1977         case DISCOVERY_STOPPING:
1978                 break;
1979         }
1980 }
1981
1982 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1983 {
1984         struct discovery_state *cache = &hdev->discovery;
1985         struct inquiry_entry *p, *n;
1986
1987         list_for_each_entry_safe(p, n, &cache->all, all) {
1988                 list_del(&p->all);
1989                 kfree(p);
1990         }
1991
1992         INIT_LIST_HEAD(&cache->unknown);
1993         INIT_LIST_HEAD(&cache->resolve);
1994 }
1995
1996 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1997                                                bdaddr_t *bdaddr)
1998 {
1999         struct discovery_state *cache = &hdev->discovery;
2000         struct inquiry_entry *e;
2001
2002         BT_DBG("cache %p, %pMR", cache, bdaddr);
2003
2004         list_for_each_entry(e, &cache->all, all) {
2005                 if (!bacmp(&e->data.bdaddr, bdaddr))
2006                         return e;
2007         }
2008
2009         return NULL;
2010 }
2011
2012 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2013                                                        bdaddr_t *bdaddr)
2014 {
2015         struct discovery_state *cache = &hdev->discovery;
2016         struct inquiry_entry *e;
2017
2018         BT_DBG("cache %p, %pMR", cache, bdaddr);
2019
2020         list_for_each_entry(e, &cache->unknown, list) {
2021                 if (!bacmp(&e->data.bdaddr, bdaddr))
2022                         return e;
2023         }
2024
2025         return NULL;
2026 }
2027
2028 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2029                                                        bdaddr_t *bdaddr,
2030                                                        int state)
2031 {
2032         struct discovery_state *cache = &hdev->discovery;
2033         struct inquiry_entry *e;
2034
2035         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2036
2037         list_for_each_entry(e, &cache->resolve, list) {
2038                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2039                         return e;
2040                 if (!bacmp(&e->data.bdaddr, bdaddr))
2041                         return e;
2042         }
2043
2044         return NULL;
2045 }
2046
2047 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2048                                       struct inquiry_entry *ie)
2049 {
2050         struct discovery_state *cache = &hdev->discovery;
2051         struct list_head *pos = &cache->resolve;
2052         struct inquiry_entry *p;
2053
2054         list_del(&ie->list);
2055
2056         list_for_each_entry(p, &cache->resolve, list) {
2057                 if (p->name_state != NAME_PENDING &&
2058                     abs(p->data.rssi) >= abs(ie->data.rssi))
2059                         break;
2060                 pos = &p->list;
2061         }
2062
2063         list_add(&ie->list, pos);
2064 }
2065
2066 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2067                              bool name_known)
2068 {
2069         struct discovery_state *cache = &hdev->discovery;
2070         struct inquiry_entry *ie;
2071         u32 flags = 0;
2072
2073         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2074
2075         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2076
2077         if (!data->ssp_mode)
2078                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2079
2080         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2081         if (ie) {
2082                 if (!ie->data.ssp_mode)
2083                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2084
2085                 if (ie->name_state == NAME_NEEDED &&
2086                     data->rssi != ie->data.rssi) {
2087                         ie->data.rssi = data->rssi;
2088                         hci_inquiry_cache_update_resolve(hdev, ie);
2089                 }
2090
2091                 goto update;
2092         }
2093
2094         /* Entry not in the cache. Add new one. */
2095         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2096         if (!ie) {
2097                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2098                 goto done;
2099         }
2100
2101         list_add(&ie->all, &cache->all);
2102
2103         if (name_known) {
2104                 ie->name_state = NAME_KNOWN;
2105         } else {
2106                 ie->name_state = NAME_NOT_KNOWN;
2107                 list_add(&ie->list, &cache->unknown);
2108         }
2109
2110 update:
2111         if (name_known && ie->name_state != NAME_KNOWN &&
2112             ie->name_state != NAME_PENDING) {
2113                 ie->name_state = NAME_KNOWN;
2114                 list_del(&ie->list);
2115         }
2116
2117         memcpy(&ie->data, data, sizeof(*data));
2118         ie->timestamp = jiffies;
2119         cache->timestamp = jiffies;
2120
2121         if (ie->name_state == NAME_NOT_KNOWN)
2122                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2123
2124 done:
2125         return flags;
2126 }
2127
2128 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2129 {
2130         struct discovery_state *cache = &hdev->discovery;
2131         struct inquiry_info *info = (struct inquiry_info *) buf;
2132         struct inquiry_entry *e;
2133         int copied = 0;
2134
2135         list_for_each_entry(e, &cache->all, all) {
2136                 struct inquiry_data *data = &e->data;
2137
2138                 if (copied >= num)
2139                         break;
2140
2141                 bacpy(&info->bdaddr, &data->bdaddr);
2142                 info->pscan_rep_mode    = data->pscan_rep_mode;
2143                 info->pscan_period_mode = data->pscan_period_mode;
2144                 info->pscan_mode        = data->pscan_mode;
2145                 memcpy(info->dev_class, data->dev_class, 3);
2146                 info->clock_offset      = data->clock_offset;
2147
2148                 info++;
2149                 copied++;
2150         }
2151
2152         BT_DBG("cache %p, copied %d", cache, copied);
2153         return copied;
2154 }
2155
2156 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2157 {
2158         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2159         struct hci_dev *hdev = req->hdev;
2160         struct hci_cp_inquiry cp;
2161
2162         BT_DBG("%s", hdev->name);
2163
2164         if (test_bit(HCI_INQUIRY, &hdev->flags))
2165                 return;
2166
2167         /* Start Inquiry */
2168         memcpy(&cp.lap, &ir->lap, 3);
2169         cp.length  = ir->length;
2170         cp.num_rsp = ir->num_rsp;
2171         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2172 }
2173
2174 static int wait_inquiry(void *word)
2175 {
2176         schedule();
2177         return signal_pending(current);
2178 }
2179
2180 int hci_inquiry(void __user *arg)
2181 {
2182         __u8 __user *ptr = arg;
2183         struct hci_inquiry_req ir;
2184         struct hci_dev *hdev;
2185         int err = 0, do_inquiry = 0, max_rsp;
2186         long timeo;
2187         __u8 *buf;
2188
2189         if (copy_from_user(&ir, ptr, sizeof(ir)))
2190                 return -EFAULT;
2191
2192         hdev = hci_dev_get(ir.dev_id);
2193         if (!hdev)
2194                 return -ENODEV;
2195
2196         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2197                 err = -EBUSY;
2198                 goto done;
2199         }
2200
2201         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2202                 err = -EOPNOTSUPP;
2203                 goto done;
2204         }
2205
2206         if (hdev->dev_type != HCI_BREDR) {
2207                 err = -EOPNOTSUPP;
2208                 goto done;
2209         }
2210
2211         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2212                 err = -EOPNOTSUPP;
2213                 goto done;
2214         }
2215
2216         hci_dev_lock(hdev);
2217         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2218             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2219                 hci_inquiry_cache_flush(hdev);
2220                 do_inquiry = 1;
2221         }
2222         hci_dev_unlock(hdev);
2223
2224         timeo = ir.length * msecs_to_jiffies(2000);
2225
2226         if (do_inquiry) {
2227                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2228                                    timeo);
2229                 if (err < 0)
2230                         goto done;
2231
2232                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2233                  * cleared). If it is interrupted by a signal, return -EINTR.
2234                  */
2235                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2236                                 TASK_INTERRUPTIBLE))
2237                         return -EINTR;
2238         }
2239
2240         /* for unlimited number of responses we will use buffer with
2241          * 255 entries
2242          */
2243         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2244
2245         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2246          * copy it to the user space.
2247          */
2248         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2249         if (!buf) {
2250                 err = -ENOMEM;
2251                 goto done;
2252         }
2253
2254         hci_dev_lock(hdev);
2255         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2256         hci_dev_unlock(hdev);
2257
2258         BT_DBG("num_rsp %d", ir.num_rsp);
2259
2260         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2261                 ptr += sizeof(ir);
2262                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2263                                  ir.num_rsp))
2264                         err = -EFAULT;
2265         } else
2266                 err = -EFAULT;
2267
2268         kfree(buf);
2269
2270 done:
2271         hci_dev_put(hdev);
2272         return err;
2273 }
2274
2275 static int hci_dev_do_open(struct hci_dev *hdev)
2276 {
2277         int ret = 0;
2278
2279         BT_DBG("%s %p", hdev->name, hdev);
2280
2281         hci_req_lock(hdev);
2282
2283         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2284                 ret = -ENODEV;
2285                 goto done;
2286         }
2287
2288         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2289             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2290                 /* Check for rfkill but allow the HCI setup stage to
2291                  * proceed (which in itself doesn't cause any RF activity).
2292                  */
2293                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2294                         ret = -ERFKILL;
2295                         goto done;
2296                 }
2297
2298                 /* Check for valid public address or a configured static
2299                  * random adddress, but let the HCI setup proceed to
2300                  * be able to determine if there is a public address
2301                  * or not.
2302                  *
2303                  * In case of user channel usage, it is not important
2304                  * if a public address or static random address is
2305                  * available.
2306                  *
2307                  * This check is only valid for BR/EDR controllers
2308                  * since AMP controllers do not have an address.
2309                  */
2310                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2311                     hdev->dev_type == HCI_BREDR &&
2312                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2313                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2314                         ret = -EADDRNOTAVAIL;
2315                         goto done;
2316                 }
2317         }
2318
2319         if (test_bit(HCI_UP, &hdev->flags)) {
2320                 ret = -EALREADY;
2321                 goto done;
2322         }
2323
2324         if (hdev->open(hdev)) {
2325                 ret = -EIO;
2326                 goto done;
2327         }
2328
2329         atomic_set(&hdev->cmd_cnt, 1);
2330         set_bit(HCI_INIT, &hdev->flags);
2331
2332         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2333                 if (hdev->setup)
2334                         ret = hdev->setup(hdev);
2335
2336                 /* The transport driver can set these quirks before
2337                  * creating the HCI device or in its setup callback.
2338                  *
2339                  * In case any of them is set, the controller has to
2340                  * start up as unconfigured.
2341                  */
2342                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2343                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2344                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2345
2346                 /* For an unconfigured controller it is required to
2347                  * read at least the version information provided by
2348                  * the Read Local Version Information command.
2349                  *
2350                  * If the set_bdaddr driver callback is provided, then
2351                  * also the original Bluetooth public device address
2352                  * will be read using the Read BD Address command.
2353                  */
2354                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2355                         ret = __hci_unconf_init(hdev);
2356         }
2357
2358         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2359                 /* If public address change is configured, ensure that
2360                  * the address gets programmed. If the driver does not
2361                  * support changing the public address, fail the power
2362                  * on procedure.
2363                  */
2364                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2365                     hdev->set_bdaddr)
2366                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2367                 else
2368                         ret = -EADDRNOTAVAIL;
2369         }
2370
2371         if (!ret) {
2372                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2373                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2374                         ret = __hci_init(hdev);
2375         }
2376
2377         clear_bit(HCI_INIT, &hdev->flags);
2378
2379         if (!ret) {
2380                 hci_dev_hold(hdev);
2381                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2382                 set_bit(HCI_UP, &hdev->flags);
2383                 hci_notify(hdev, HCI_DEV_UP);
2384                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2385                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2386                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2387                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2388                     hdev->dev_type == HCI_BREDR) {
2389                         hci_dev_lock(hdev);
2390                         mgmt_powered(hdev, 1);
2391                         hci_dev_unlock(hdev);
2392                 }
2393         } else {
2394                 /* Init failed, cleanup */
2395                 flush_work(&hdev->tx_work);
2396                 flush_work(&hdev->cmd_work);
2397                 flush_work(&hdev->rx_work);
2398
2399                 skb_queue_purge(&hdev->cmd_q);
2400                 skb_queue_purge(&hdev->rx_q);
2401
2402                 if (hdev->flush)
2403                         hdev->flush(hdev);
2404
2405                 if (hdev->sent_cmd) {
2406                         kfree_skb(hdev->sent_cmd);
2407                         hdev->sent_cmd = NULL;
2408                 }
2409
2410                 hdev->close(hdev);
2411                 hdev->flags &= BIT(HCI_RAW);
2412         }
2413
2414 done:
2415         hci_req_unlock(hdev);
2416         return ret;
2417 }
2418
2419 /* ---- HCI ioctl helpers ---- */
2420
2421 int hci_dev_open(__u16 dev)
2422 {
2423         struct hci_dev *hdev;
2424         int err;
2425
2426         hdev = hci_dev_get(dev);
2427         if (!hdev)
2428                 return -ENODEV;
2429
2430         /* Devices that are marked as unconfigured can only be powered
2431          * up as user channel. Trying to bring them up as normal devices
2432          * will result into a failure. Only user channel operation is
2433          * possible.
2434          *
2435          * When this function is called for a user channel, the flag
2436          * HCI_USER_CHANNEL will be set first before attempting to
2437          * open the device.
2438          */
2439         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2440             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2441                 err = -EOPNOTSUPP;
2442                 goto done;
2443         }
2444
2445         /* We need to ensure that no other power on/off work is pending
2446          * before proceeding to call hci_dev_do_open. This is
2447          * particularly important if the setup procedure has not yet
2448          * completed.
2449          */
2450         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2451                 cancel_delayed_work(&hdev->power_off);
2452
2453         /* After this call it is guaranteed that the setup procedure
2454          * has finished. This means that error conditions like RFKILL
2455          * or no valid public or static random address apply.
2456          */
2457         flush_workqueue(hdev->req_workqueue);
2458
2459         /* For controllers not using the management interface and that
2460          * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2461          * so that pairing works for them. Once the management interface
2462          * is in use this bit will be cleared again and userspace has
2463          * to explicitly enable it.
2464          */
2465         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2466             !test_bit(HCI_MGMT, &hdev->dev_flags))
2467                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2468
2469         err = hci_dev_do_open(hdev);
2470
2471 done:
2472         hci_dev_put(hdev);
2473         return err;
2474 }
2475
2476 /* This function requires the caller holds hdev->lock */
2477 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2478 {
2479         struct hci_conn_params *p;
2480
2481         list_for_each_entry(p, &hdev->le_conn_params, list)
2482                 list_del_init(&p->action);
2483
2484         BT_DBG("All LE pending actions cleared");
2485 }
2486
2487 static int hci_dev_do_close(struct hci_dev *hdev)
2488 {
2489         BT_DBG("%s %p", hdev->name, hdev);
2490
2491         cancel_delayed_work(&hdev->power_off);
2492
2493         hci_req_cancel(hdev, ENODEV);
2494         hci_req_lock(hdev);
2495
2496         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2497                 cancel_delayed_work_sync(&hdev->cmd_timer);
2498                 hci_req_unlock(hdev);
2499                 return 0;
2500         }
2501
2502         /* Flush RX and TX works */
2503         flush_work(&hdev->tx_work);
2504         flush_work(&hdev->rx_work);
2505
2506         if (hdev->discov_timeout > 0) {
2507                 cancel_delayed_work(&hdev->discov_off);
2508                 hdev->discov_timeout = 0;
2509                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2510                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2511         }
2512
2513         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2514                 cancel_delayed_work(&hdev->service_cache);
2515
2516         cancel_delayed_work_sync(&hdev->le_scan_disable);
2517
2518         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2519                 cancel_delayed_work_sync(&hdev->rpa_expired);
2520
2521         hci_dev_lock(hdev);
2522         hci_inquiry_cache_flush(hdev);
2523         hci_conn_hash_flush(hdev);
2524         hci_pend_le_actions_clear(hdev);
2525         hci_dev_unlock(hdev);
2526
2527         hci_notify(hdev, HCI_DEV_DOWN);
2528
2529         if (hdev->flush)
2530                 hdev->flush(hdev);
2531
2532         /* Reset device */
2533         skb_queue_purge(&hdev->cmd_q);
2534         atomic_set(&hdev->cmd_cnt, 1);
2535         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2536             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2537             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2538                 set_bit(HCI_INIT, &hdev->flags);
2539                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2540                 clear_bit(HCI_INIT, &hdev->flags);
2541         }
2542
2543         /* flush cmd  work */
2544         flush_work(&hdev->cmd_work);
2545
2546         /* Drop queues */
2547         skb_queue_purge(&hdev->rx_q);
2548         skb_queue_purge(&hdev->cmd_q);
2549         skb_queue_purge(&hdev->raw_q);
2550
2551         /* Drop last sent command */
2552         if (hdev->sent_cmd) {
2553                 cancel_delayed_work_sync(&hdev->cmd_timer);
2554                 kfree_skb(hdev->sent_cmd);
2555                 hdev->sent_cmd = NULL;
2556         }
2557
2558         kfree_skb(hdev->recv_evt);
2559         hdev->recv_evt = NULL;
2560
2561         /* After this point our queues are empty
2562          * and no tasks are scheduled. */
2563         hdev->close(hdev);
2564
2565         /* Clear flags */
2566         hdev->flags &= BIT(HCI_RAW);
2567         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2568
2569         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2570                 if (hdev->dev_type == HCI_BREDR) {
2571                         hci_dev_lock(hdev);
2572                         mgmt_powered(hdev, 0);
2573                         hci_dev_unlock(hdev);
2574                 }
2575         }
2576
2577         /* Controller radio is available but is currently powered down */
2578         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2579
2580         memset(hdev->eir, 0, sizeof(hdev->eir));
2581         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2582         bacpy(&hdev->random_addr, BDADDR_ANY);
2583
2584         hci_req_unlock(hdev);
2585
2586         hci_dev_put(hdev);
2587         return 0;
2588 }
2589
2590 int hci_dev_close(__u16 dev)
2591 {
2592         struct hci_dev *hdev;
2593         int err;
2594
2595         hdev = hci_dev_get(dev);
2596         if (!hdev)
2597                 return -ENODEV;
2598
2599         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2600                 err = -EBUSY;
2601                 goto done;
2602         }
2603
2604         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2605                 cancel_delayed_work(&hdev->power_off);
2606
2607         err = hci_dev_do_close(hdev);
2608
2609 done:
2610         hci_dev_put(hdev);
2611         return err;
2612 }
2613
2614 int hci_dev_reset(__u16 dev)
2615 {
2616         struct hci_dev *hdev;
2617         int ret = 0;
2618
2619         hdev = hci_dev_get(dev);
2620         if (!hdev)
2621                 return -ENODEV;
2622
2623         hci_req_lock(hdev);
2624
2625         if (!test_bit(HCI_UP, &hdev->flags)) {
2626                 ret = -ENETDOWN;
2627                 goto done;
2628         }
2629
2630         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2631                 ret = -EBUSY;
2632                 goto done;
2633         }
2634
2635         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2636                 ret = -EOPNOTSUPP;
2637                 goto done;
2638         }
2639
2640         /* Drop queues */
2641         skb_queue_purge(&hdev->rx_q);
2642         skb_queue_purge(&hdev->cmd_q);
2643
2644         hci_dev_lock(hdev);
2645         hci_inquiry_cache_flush(hdev);
2646         hci_conn_hash_flush(hdev);
2647         hci_dev_unlock(hdev);
2648
2649         if (hdev->flush)
2650                 hdev->flush(hdev);
2651
2652         atomic_set(&hdev->cmd_cnt, 1);
2653         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2654
2655         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2656
2657 done:
2658         hci_req_unlock(hdev);
2659         hci_dev_put(hdev);
2660         return ret;
2661 }
2662
2663 int hci_dev_reset_stat(__u16 dev)
2664 {
2665         struct hci_dev *hdev;
2666         int ret = 0;
2667
2668         hdev = hci_dev_get(dev);
2669         if (!hdev)
2670                 return -ENODEV;
2671
2672         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2673                 ret = -EBUSY;
2674                 goto done;
2675         }
2676
2677         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2678                 ret = -EOPNOTSUPP;
2679                 goto done;
2680         }
2681
2682         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2683
2684 done:
2685         hci_dev_put(hdev);
2686         return ret;
2687 }
2688
2689 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2690 {
2691         bool conn_changed, discov_changed;
2692
2693         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2694
2695         if ((scan & SCAN_PAGE))
2696                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2697                                                  &hdev->dev_flags);
2698         else
2699                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2700                                                   &hdev->dev_flags);
2701
2702         if ((scan & SCAN_INQUIRY)) {
2703                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2704                                                    &hdev->dev_flags);
2705         } else {
2706                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2707                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2708                                                     &hdev->dev_flags);
2709         }
2710
2711         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2712                 return;
2713
2714         if (conn_changed || discov_changed) {
2715                 /* In case this was disabled through mgmt */
2716                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2717
2718                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2719                         mgmt_update_adv_data(hdev);
2720
2721                 mgmt_new_settings(hdev);
2722         }
2723 }
2724
2725 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2726 {
2727         struct hci_dev *hdev;
2728         struct hci_dev_req dr;
2729         int err = 0;
2730
2731         if (copy_from_user(&dr, arg, sizeof(dr)))
2732                 return -EFAULT;
2733
2734         hdev = hci_dev_get(dr.dev_id);
2735         if (!hdev)
2736                 return -ENODEV;
2737
2738         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2739                 err = -EBUSY;
2740                 goto done;
2741         }
2742
2743         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2744                 err = -EOPNOTSUPP;
2745                 goto done;
2746         }
2747
2748         if (hdev->dev_type != HCI_BREDR) {
2749                 err = -EOPNOTSUPP;
2750                 goto done;
2751         }
2752
2753         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2754                 err = -EOPNOTSUPP;
2755                 goto done;
2756         }
2757
2758         switch (cmd) {
2759         case HCISETAUTH:
2760                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2761                                    HCI_INIT_TIMEOUT);
2762                 break;
2763
2764         case HCISETENCRYPT:
2765                 if (!lmp_encrypt_capable(hdev)) {
2766                         err = -EOPNOTSUPP;
2767                         break;
2768                 }
2769
2770                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2771                         /* Auth must be enabled first */
2772                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2773                                            HCI_INIT_TIMEOUT);
2774                         if (err)
2775                                 break;
2776                 }
2777
2778                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2779                                    HCI_INIT_TIMEOUT);
2780                 break;
2781
2782         case HCISETSCAN:
2783                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2784                                    HCI_INIT_TIMEOUT);
2785
2786                 /* Ensure that the connectable and discoverable states
2787                  * get correctly modified as this was a non-mgmt change.
2788                  */
2789                 if (!err)
2790                         hci_update_scan_state(hdev, dr.dev_opt);
2791                 break;
2792
2793         case HCISETLINKPOL:
2794                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2795                                    HCI_INIT_TIMEOUT);
2796                 break;
2797
2798         case HCISETLINKMODE:
2799                 hdev->link_mode = ((__u16) dr.dev_opt) &
2800                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2801                 break;
2802
2803         case HCISETPTYPE:
2804                 hdev->pkt_type = (__u16) dr.dev_opt;
2805                 break;
2806
2807         case HCISETACLMTU:
2808                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2809                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2810                 break;
2811
2812         case HCISETSCOMTU:
2813                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2814                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2815                 break;
2816
2817         default:
2818                 err = -EINVAL;
2819                 break;
2820         }
2821
2822 done:
2823         hci_dev_put(hdev);
2824         return err;
2825 }
2826
2827 int hci_get_dev_list(void __user *arg)
2828 {
2829         struct hci_dev *hdev;
2830         struct hci_dev_list_req *dl;
2831         struct hci_dev_req *dr;
2832         int n = 0, size, err;
2833         __u16 dev_num;
2834
2835         if (get_user(dev_num, (__u16 __user *) arg))
2836                 return -EFAULT;
2837
2838         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2839                 return -EINVAL;
2840
2841         size = sizeof(*dl) + dev_num * sizeof(*dr);
2842
2843         dl = kzalloc(size, GFP_KERNEL);
2844         if (!dl)
2845                 return -ENOMEM;
2846
2847         dr = dl->dev_req;
2848
2849         read_lock(&hci_dev_list_lock);
2850         list_for_each_entry(hdev, &hci_dev_list, list) {
2851                 unsigned long flags = hdev->flags;
2852
2853                 /* When the auto-off is configured it means the transport
2854                  * is running, but in that case still indicate that the
2855                  * device is actually down.
2856                  */
2857                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2858                         flags &= ~BIT(HCI_UP);
2859
2860                 (dr + n)->dev_id  = hdev->id;
2861                 (dr + n)->dev_opt = flags;
2862
2863                 if (++n >= dev_num)
2864                         break;
2865         }
2866         read_unlock(&hci_dev_list_lock);
2867
2868         dl->dev_num = n;
2869         size = sizeof(*dl) + n * sizeof(*dr);
2870
2871         err = copy_to_user(arg, dl, size);
2872         kfree(dl);
2873
2874         return err ? -EFAULT : 0;
2875 }
2876
2877 int hci_get_dev_info(void __user *arg)
2878 {
2879         struct hci_dev *hdev;
2880         struct hci_dev_info di;
2881         unsigned long flags;
2882         int err = 0;
2883
2884         if (copy_from_user(&di, arg, sizeof(di)))
2885                 return -EFAULT;
2886
2887         hdev = hci_dev_get(di.dev_id);
2888         if (!hdev)
2889                 return -ENODEV;
2890
2891         /* When the auto-off is configured it means the transport
2892          * is running, but in that case still indicate that the
2893          * device is actually down.
2894          */
2895         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2896                 flags = hdev->flags & ~BIT(HCI_UP);
2897         else
2898                 flags = hdev->flags;
2899
2900         strcpy(di.name, hdev->name);
2901         di.bdaddr   = hdev->bdaddr;
2902         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2903         di.flags    = flags;
2904         di.pkt_type = hdev->pkt_type;
2905         if (lmp_bredr_capable(hdev)) {
2906                 di.acl_mtu  = hdev->acl_mtu;
2907                 di.acl_pkts = hdev->acl_pkts;
2908                 di.sco_mtu  = hdev->sco_mtu;
2909                 di.sco_pkts = hdev->sco_pkts;
2910         } else {
2911                 di.acl_mtu  = hdev->le_mtu;
2912                 di.acl_pkts = hdev->le_pkts;
2913                 di.sco_mtu  = 0;
2914                 di.sco_pkts = 0;
2915         }
2916         di.link_policy = hdev->link_policy;
2917         di.link_mode   = hdev->link_mode;
2918
2919         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2920         memcpy(&di.features, &hdev->features, sizeof(di.features));
2921
2922         if (copy_to_user(arg, &di, sizeof(di)))
2923                 err = -EFAULT;
2924
2925         hci_dev_put(hdev);
2926
2927         return err;
2928 }
2929
2930 /* ---- Interface to HCI drivers ---- */
2931
2932 static int hci_rfkill_set_block(void *data, bool blocked)
2933 {
2934         struct hci_dev *hdev = data;
2935
2936         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2937
2938         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2939                 return -EBUSY;
2940
2941         if (blocked) {
2942                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2943                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2944                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2945                         hci_dev_do_close(hdev);
2946         } else {
2947                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2948         }
2949
2950         return 0;
2951 }
2952
2953 static const struct rfkill_ops hci_rfkill_ops = {
2954         .set_block = hci_rfkill_set_block,
2955 };
2956
2957 static void hci_power_on(struct work_struct *work)
2958 {
2959         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2960         int err;
2961
2962         BT_DBG("%s", hdev->name);
2963
2964         err = hci_dev_do_open(hdev);
2965         if (err < 0) {
2966                 mgmt_set_powered_failed(hdev, err);
2967                 return;
2968         }
2969
2970         /* During the HCI setup phase, a few error conditions are
2971          * ignored and they need to be checked now. If they are still
2972          * valid, it is important to turn the device back off.
2973          */
2974         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2975             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2976             (hdev->dev_type == HCI_BREDR &&
2977              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2978              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2979                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2980                 hci_dev_do_close(hdev);
2981         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2982                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2983                                    HCI_AUTO_OFF_TIMEOUT);
2984         }
2985
2986         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2987                 /* For unconfigured devices, set the HCI_RAW flag
2988                  * so that userspace can easily identify them.
2989                  */
2990                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2991                         set_bit(HCI_RAW, &hdev->flags);
2992
2993                 /* For fully configured devices, this will send
2994                  * the Index Added event. For unconfigured devices,
2995                  * it will send Unconfigued Index Added event.
2996                  *
2997                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2998                  * and no event will be send.
2999                  */
3000                 mgmt_index_added(hdev);
3001         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3002                 /* When the controller is now configured, then it
3003                  * is important to clear the HCI_RAW flag.
3004                  */
3005                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3006                         clear_bit(HCI_RAW, &hdev->flags);
3007
3008                 /* Powering on the controller with HCI_CONFIG set only
3009                  * happens with the transition from unconfigured to
3010                  * configured. This will send the Index Added event.
3011                  */
3012                 mgmt_index_added(hdev);
3013         }
3014 }
3015
3016 static void hci_power_off(struct work_struct *work)
3017 {
3018         struct hci_dev *hdev = container_of(work, struct hci_dev,
3019                                             power_off.work);
3020
3021         BT_DBG("%s", hdev->name);
3022
3023         hci_dev_do_close(hdev);
3024 }
3025
3026 static void hci_discov_off(struct work_struct *work)
3027 {
3028         struct hci_dev *hdev;
3029
3030         hdev = container_of(work, struct hci_dev, discov_off.work);
3031
3032         BT_DBG("%s", hdev->name);
3033
3034         mgmt_discoverable_timeout(hdev);
3035 }
3036
3037 void hci_uuids_clear(struct hci_dev *hdev)
3038 {
3039         struct bt_uuid *uuid, *tmp;
3040
3041         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3042                 list_del(&uuid->list);
3043                 kfree(uuid);
3044         }
3045 }
3046
3047 void hci_link_keys_clear(struct hci_dev *hdev)
3048 {
3049         struct list_head *p, *n;
3050
3051         list_for_each_safe(p, n, &hdev->link_keys) {
3052                 struct link_key *key;
3053
3054                 key = list_entry(p, struct link_key, list);
3055
3056                 list_del(p);
3057                 kfree(key);
3058         }
3059 }
3060
3061 void hci_smp_ltks_clear(struct hci_dev *hdev)
3062 {
3063         struct smp_ltk *k, *tmp;
3064
3065         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3066                 list_del(&k->list);
3067                 kfree(k);
3068         }
3069 }
3070
3071 void hci_smp_irks_clear(struct hci_dev *hdev)
3072 {
3073         struct smp_irk *k, *tmp;
3074
3075         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3076                 list_del(&k->list);
3077                 kfree(k);
3078         }
3079 }
3080
3081 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3082 {
3083         struct link_key *k;
3084
3085         list_for_each_entry(k, &hdev->link_keys, list)
3086                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3087                         return k;
3088
3089         return NULL;
3090 }
3091
3092 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3093                                u8 key_type, u8 old_key_type)
3094 {
3095         /* Legacy key */
3096         if (key_type < 0x03)
3097                 return true;
3098
3099         /* Debug keys are insecure so don't store them persistently */
3100         if (key_type == HCI_LK_DEBUG_COMBINATION)
3101                 return false;
3102
3103         /* Changed combination key and there's no previous one */
3104         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3105                 return false;
3106
3107         /* Security mode 3 case */
3108         if (!conn)
3109                 return true;
3110
3111         /* Neither local nor remote side had no-bonding as requirement */
3112         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3113                 return true;
3114
3115         /* Local side had dedicated bonding as requirement */
3116         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3117                 return true;
3118
3119         /* Remote side had dedicated bonding as requirement */
3120         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3121                 return true;
3122
3123         /* If none of the above criteria match, then don't store the key
3124          * persistently */
3125         return false;
3126 }
3127
3128 static u8 ltk_role(u8 type)
3129 {
3130         if (type == SMP_LTK)
3131                 return HCI_ROLE_MASTER;
3132
3133         return HCI_ROLE_SLAVE;
3134 }
3135
3136 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3137                              u8 role)
3138 {
3139         struct smp_ltk *k;
3140
3141         list_for_each_entry(k, &hdev->long_term_keys, list) {
3142                 if (k->ediv != ediv || k->rand != rand)
3143                         continue;
3144
3145                 if (ltk_role(k->type) != role)
3146                         continue;
3147
3148                 return k;
3149         }
3150
3151         return NULL;
3152 }
3153
3154 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3155                                      u8 addr_type, u8 role)
3156 {
3157         struct smp_ltk *k;
3158
3159         list_for_each_entry(k, &hdev->long_term_keys, list)
3160                 if (addr_type == k->bdaddr_type &&
3161                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3162                     ltk_role(k->type) == role)
3163                         return k;
3164
3165         return NULL;
3166 }
3167
3168 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3169 {
3170         struct smp_irk *irk;
3171
3172         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3173                 if (!bacmp(&irk->rpa, rpa))
3174                         return irk;
3175         }
3176
3177         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3178                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3179                         bacpy(&irk->rpa, rpa);
3180                         return irk;
3181                 }
3182         }
3183
3184         return NULL;
3185 }
3186
3187 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3188                                      u8 addr_type)
3189 {
3190         struct smp_irk *irk;
3191
3192         /* Identity Address must be public or static random */
3193         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3194                 return NULL;
3195
3196         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3197                 if (addr_type == irk->addr_type &&
3198                     bacmp(bdaddr, &irk->bdaddr) == 0)
3199                         return irk;
3200         }
3201
3202         return NULL;
3203 }
3204
3205 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3206                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3207                                   u8 pin_len, bool *persistent)
3208 {
3209         struct link_key *key, *old_key;
3210         u8 old_key_type;
3211
3212         old_key = hci_find_link_key(hdev, bdaddr);
3213         if (old_key) {
3214                 old_key_type = old_key->type;
3215                 key = old_key;
3216         } else {
3217                 old_key_type = conn ? conn->key_type : 0xff;
3218                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3219                 if (!key)
3220                         return NULL;
3221                 list_add(&key->list, &hdev->link_keys);
3222         }
3223
3224         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3225
3226         /* Some buggy controller combinations generate a changed
3227          * combination key for legacy pairing even when there's no
3228          * previous key */
3229         if (type == HCI_LK_CHANGED_COMBINATION &&
3230             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3231                 type = HCI_LK_COMBINATION;
3232                 if (conn)
3233                         conn->key_type = type;
3234         }
3235
3236         bacpy(&key->bdaddr, bdaddr);
3237         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3238         key->pin_len = pin_len;
3239
3240         if (type == HCI_LK_CHANGED_COMBINATION)
3241                 key->type = old_key_type;
3242         else
3243                 key->type = type;
3244
3245         if (persistent)
3246                 *persistent = hci_persistent_key(hdev, conn, type,
3247                                                  old_key_type);
3248
3249         return key;
3250 }
3251
3252 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3253                             u8 addr_type, u8 type, u8 authenticated,
3254                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3255 {
3256         struct smp_ltk *key, *old_key;
3257         u8 role = ltk_role(type);
3258
3259         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3260         if (old_key)
3261                 key = old_key;
3262         else {
3263                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3264                 if (!key)
3265                         return NULL;
3266                 list_add(&key->list, &hdev->long_term_keys);
3267         }
3268
3269         bacpy(&key->bdaddr, bdaddr);
3270         key->bdaddr_type = addr_type;
3271         memcpy(key->val, tk, sizeof(key->val));
3272         key->authenticated = authenticated;
3273         key->ediv = ediv;
3274         key->rand = rand;
3275         key->enc_size = enc_size;
3276         key->type = type;
3277
3278         return key;
3279 }
3280
3281 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3282                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3283 {
3284         struct smp_irk *irk;
3285
3286         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3287         if (!irk) {
3288                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3289                 if (!irk)
3290                         return NULL;
3291
3292                 bacpy(&irk->bdaddr, bdaddr);
3293                 irk->addr_type = addr_type;
3294
3295                 list_add(&irk->list, &hdev->identity_resolving_keys);
3296         }
3297
3298         memcpy(irk->val, val, 16);
3299         bacpy(&irk->rpa, rpa);
3300
3301         return irk;
3302 }
3303
3304 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3305 {
3306         struct link_key *key;
3307
3308         key = hci_find_link_key(hdev, bdaddr);
3309         if (!key)
3310                 return -ENOENT;
3311
3312         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3313
3314         list_del(&key->list);
3315         kfree(key);
3316
3317         return 0;
3318 }
3319
3320 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3321 {
3322         struct smp_ltk *k, *tmp;
3323         int removed = 0;
3324
3325         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3326                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3327                         continue;
3328
3329                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3330
3331                 list_del(&k->list);
3332                 kfree(k);
3333                 removed++;
3334         }
3335
3336         return removed ? 0 : -ENOENT;
3337 }
3338
3339 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3340 {
3341         struct smp_irk *k, *tmp;
3342
3343         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3344                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3345                         continue;
3346
3347                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3348
3349                 list_del(&k->list);
3350                 kfree(k);
3351         }
3352 }
3353
3354 /* HCI command timer function */
3355 static void hci_cmd_timeout(struct work_struct *work)
3356 {
3357         struct hci_dev *hdev = container_of(work, struct hci_dev,
3358                                             cmd_timer.work);
3359
3360         if (hdev->sent_cmd) {
3361                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3362                 u16 opcode = __le16_to_cpu(sent->opcode);
3363
3364                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3365         } else {
3366                 BT_ERR("%s command tx timeout", hdev->name);
3367         }
3368
3369         atomic_set(&hdev->cmd_cnt, 1);
3370         queue_work(hdev->workqueue, &hdev->cmd_work);
3371 }
3372
3373 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3374                                           bdaddr_t *bdaddr)
3375 {
3376         struct oob_data *data;
3377
3378         list_for_each_entry(data, &hdev->remote_oob_data, list)
3379                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3380                         return data;
3381
3382         return NULL;
3383 }
3384
3385 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3386 {
3387         struct oob_data *data;
3388
3389         data = hci_find_remote_oob_data(hdev, bdaddr);
3390         if (!data)
3391                 return -ENOENT;
3392
3393         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3394
3395         list_del(&data->list);
3396         kfree(data);
3397
3398         return 0;
3399 }
3400
3401 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3402 {
3403         struct oob_data *data, *n;
3404
3405         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3406                 list_del(&data->list);
3407                 kfree(data);
3408         }
3409 }
3410
3411 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3412                             u8 *hash, u8 *randomizer)
3413 {
3414         struct oob_data *data;
3415
3416         data = hci_find_remote_oob_data(hdev, bdaddr);
3417         if (!data) {
3418                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3419                 if (!data)
3420                         return -ENOMEM;
3421
3422                 bacpy(&data->bdaddr, bdaddr);
3423                 list_add(&data->list, &hdev->remote_oob_data);
3424         }
3425
3426         memcpy(data->hash192, hash, sizeof(data->hash192));
3427         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3428
3429         memset(data->hash256, 0, sizeof(data->hash256));
3430         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3431
3432         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3433
3434         return 0;
3435 }
3436
3437 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3438                                 u8 *hash192, u8 *randomizer192,
3439                                 u8 *hash256, u8 *randomizer256)
3440 {
3441         struct oob_data *data;
3442
3443         data = hci_find_remote_oob_data(hdev, bdaddr);
3444         if (!data) {
3445                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3446                 if (!data)
3447                         return -ENOMEM;
3448
3449                 bacpy(&data->bdaddr, bdaddr);
3450                 list_add(&data->list, &hdev->remote_oob_data);
3451         }
3452
3453         memcpy(data->hash192, hash192, sizeof(data->hash192));
3454         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3455
3456         memcpy(data->hash256, hash256, sizeof(data->hash256));
3457         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3458
3459         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3460
3461         return 0;
3462 }
3463
3464 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3465                                          bdaddr_t *bdaddr, u8 type)
3466 {
3467         struct bdaddr_list *b;
3468
3469         list_for_each_entry(b, bdaddr_list, list) {
3470                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3471                         return b;
3472         }
3473
3474         return NULL;
3475 }
3476
3477 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3478 {
3479         struct list_head *p, *n;
3480
3481         list_for_each_safe(p, n, bdaddr_list) {
3482                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3483
3484                 list_del(p);
3485                 kfree(b);
3486         }
3487 }
3488
3489 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3490 {
3491         struct bdaddr_list *entry;
3492
3493         if (!bacmp(bdaddr, BDADDR_ANY))
3494                 return -EBADF;
3495
3496         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3497                 return -EEXIST;
3498
3499         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3500         if (!entry)
3501                 return -ENOMEM;
3502
3503         bacpy(&entry->bdaddr, bdaddr);
3504         entry->bdaddr_type = type;
3505
3506         list_add(&entry->list, list);
3507
3508         return 0;
3509 }
3510
3511 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3512 {
3513         struct bdaddr_list *entry;
3514
3515         if (!bacmp(bdaddr, BDADDR_ANY)) {
3516                 hci_bdaddr_list_clear(list);
3517                 return 0;
3518         }
3519
3520         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3521         if (!entry)
3522                 return -ENOENT;
3523
3524         list_del(&entry->list);
3525         kfree(entry);
3526
3527         return 0;
3528 }
3529
3530 /* This function requires the caller holds hdev->lock */
3531 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3532                                                bdaddr_t *addr, u8 addr_type)
3533 {
3534         struct hci_conn_params *params;
3535
3536         /* The conn params list only contains identity addresses */
3537         if (!hci_is_identity_address(addr, addr_type))
3538                 return NULL;
3539
3540         list_for_each_entry(params, &hdev->le_conn_params, list) {
3541                 if (bacmp(&params->addr, addr) == 0 &&
3542                     params->addr_type == addr_type) {
3543                         return params;
3544                 }
3545         }
3546
3547         return NULL;
3548 }
3549
3550 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3551 {
3552         struct hci_conn *conn;
3553
3554         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3555         if (!conn)
3556                 return false;
3557
3558         if (conn->dst_type != type)
3559                 return false;
3560
3561         if (conn->state != BT_CONNECTED)
3562                 return false;
3563
3564         return true;
3565 }
3566
3567 /* This function requires the caller holds hdev->lock */
3568 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3569                                                   bdaddr_t *addr, u8 addr_type)
3570 {
3571         struct hci_conn_params *param;
3572
3573         /* The list only contains identity addresses */
3574         if (!hci_is_identity_address(addr, addr_type))
3575                 return NULL;
3576
3577         list_for_each_entry(param, list, action) {
3578                 if (bacmp(&param->addr, addr) == 0 &&
3579                     param->addr_type == addr_type)
3580                         return param;
3581         }
3582
3583         return NULL;
3584 }
3585
3586 /* This function requires the caller holds hdev->lock */
3587 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3588                                             bdaddr_t *addr, u8 addr_type)
3589 {
3590         struct hci_conn_params *params;
3591
3592         if (!hci_is_identity_address(addr, addr_type))
3593                 return NULL;
3594
3595         params = hci_conn_params_lookup(hdev, addr, addr_type);
3596         if (params)
3597                 return params;
3598
3599         params = kzalloc(sizeof(*params), GFP_KERNEL);
3600         if (!params) {
3601                 BT_ERR("Out of memory");
3602                 return NULL;
3603         }
3604
3605         bacpy(&params->addr, addr);
3606         params->addr_type = addr_type;
3607
3608         list_add(&params->list, &hdev->le_conn_params);
3609         INIT_LIST_HEAD(&params->action);
3610
3611         params->conn_min_interval = hdev->le_conn_min_interval;
3612         params->conn_max_interval = hdev->le_conn_max_interval;
3613         params->conn_latency = hdev->le_conn_latency;
3614         params->supervision_timeout = hdev->le_supv_timeout;
3615         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3616
3617         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3618
3619         return params;
3620 }
3621
3622 /* This function requires the caller holds hdev->lock */
3623 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3624                         u8 auto_connect)
3625 {
3626         struct hci_conn_params *params;
3627
3628         params = hci_conn_params_add(hdev, addr, addr_type);
3629         if (!params)
3630                 return -EIO;
3631
3632         if (params->auto_connect == auto_connect)
3633                 return 0;
3634
3635         list_del_init(&params->action);
3636
3637         switch (auto_connect) {
3638         case HCI_AUTO_CONN_DISABLED:
3639         case HCI_AUTO_CONN_LINK_LOSS:
3640                 hci_update_background_scan(hdev);
3641                 break;
3642         case HCI_AUTO_CONN_REPORT:
3643                 list_add(&params->action, &hdev->pend_le_reports);
3644                 hci_update_background_scan(hdev);
3645                 break;
3646         case HCI_AUTO_CONN_ALWAYS:
3647                 if (!is_connected(hdev, addr, addr_type)) {
3648                         list_add(&params->action, &hdev->pend_le_conns);
3649                         hci_update_background_scan(hdev);
3650                 }
3651                 break;
3652         }
3653
3654         params->auto_connect = auto_connect;
3655
3656         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3657                auto_connect);
3658
3659         return 0;
3660 }
3661
3662 /* This function requires the caller holds hdev->lock */
3663 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3664 {
3665         struct hci_conn_params *params;
3666
3667         params = hci_conn_params_lookup(hdev, addr, addr_type);
3668         if (!params)
3669                 return;
3670
3671         list_del(&params->action);
3672         list_del(&params->list);
3673         kfree(params);
3674
3675         hci_update_background_scan(hdev);
3676
3677         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3678 }
3679
3680 /* This function requires the caller holds hdev->lock */
3681 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3682 {
3683         struct hci_conn_params *params, *tmp;
3684
3685         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3686                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3687                         continue;
3688                 list_del(&params->list);
3689                 kfree(params);
3690         }
3691
3692         BT_DBG("All LE disabled connection parameters were removed");
3693 }
3694
3695 /* This function requires the caller holds hdev->lock */
3696 void hci_conn_params_clear_all(struct hci_dev *hdev)
3697 {
3698         struct hci_conn_params *params, *tmp;
3699
3700         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3701                 list_del(&params->action);
3702                 list_del(&params->list);
3703                 kfree(params);
3704         }
3705
3706         hci_update_background_scan(hdev);
3707
3708         BT_DBG("All LE connection parameters were removed");
3709 }
3710
3711 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3712 {
3713         if (status) {
3714                 BT_ERR("Failed to start inquiry: status %d", status);
3715
3716                 hci_dev_lock(hdev);
3717                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3718                 hci_dev_unlock(hdev);
3719                 return;
3720         }
3721 }
3722
3723 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3724 {
3725         /* General inquiry access code (GIAC) */
3726         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3727         struct hci_request req;
3728         struct hci_cp_inquiry cp;
3729         int err;
3730
3731         if (status) {
3732                 BT_ERR("Failed to disable LE scanning: status %d", status);
3733                 return;
3734         }
3735
3736         switch (hdev->discovery.type) {
3737         case DISCOV_TYPE_LE:
3738                 hci_dev_lock(hdev);
3739                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3740                 hci_dev_unlock(hdev);
3741                 break;
3742
3743         case DISCOV_TYPE_INTERLEAVED:
3744                 hci_req_init(&req, hdev);
3745
3746                 memset(&cp, 0, sizeof(cp));
3747                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3748                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3749                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3750
3751                 hci_dev_lock(hdev);
3752
3753                 hci_inquiry_cache_flush(hdev);
3754
3755                 err = hci_req_run(&req, inquiry_complete);
3756                 if (err) {
3757                         BT_ERR("Inquiry request failed: err %d", err);
3758                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3759                 }
3760
3761                 hci_dev_unlock(hdev);
3762                 break;
3763         }
3764 }
3765
3766 static void le_scan_disable_work(struct work_struct *work)
3767 {
3768         struct hci_dev *hdev = container_of(work, struct hci_dev,
3769                                             le_scan_disable.work);
3770         struct hci_request req;
3771         int err;
3772
3773         BT_DBG("%s", hdev->name);
3774
3775         hci_req_init(&req, hdev);
3776
3777         hci_req_add_le_scan_disable(&req);
3778
3779         err = hci_req_run(&req, le_scan_disable_work_complete);
3780         if (err)
3781                 BT_ERR("Disable LE scanning request failed: err %d", err);
3782 }
3783
3784 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3785 {
3786         struct hci_dev *hdev = req->hdev;
3787
3788         /* If we're advertising or initiating an LE connection we can't
3789          * go ahead and change the random address at this time. This is
3790          * because the eventual initiator address used for the
3791          * subsequently created connection will be undefined (some
3792          * controllers use the new address and others the one we had
3793          * when the operation started).
3794          *
3795          * In this kind of scenario skip the update and let the random
3796          * address be updated at the next cycle.
3797          */
3798         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3799             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3800                 BT_DBG("Deferring random address update");
3801                 return;
3802         }
3803
3804         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3805 }
3806
3807 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3808                               u8 *own_addr_type)
3809 {
3810         struct hci_dev *hdev = req->hdev;
3811         int err;
3812
3813         /* If privacy is enabled use a resolvable private address. If
3814          * current RPA has expired or there is something else than
3815          * the current RPA in use, then generate a new one.
3816          */
3817         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3818                 int to;
3819
3820                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3821
3822                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3823                     !bacmp(&hdev->random_addr, &hdev->rpa))
3824                         return 0;
3825
3826                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3827                 if (err < 0) {
3828                         BT_ERR("%s failed to generate new RPA", hdev->name);
3829                         return err;
3830                 }
3831
3832                 set_random_addr(req, &hdev->rpa);
3833
3834                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3835                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3836
3837                 return 0;
3838         }
3839
3840         /* In case of required privacy without resolvable private address,
3841          * use an unresolvable private address. This is useful for active
3842          * scanning and non-connectable advertising.
3843          */
3844         if (require_privacy) {
3845                 bdaddr_t urpa;
3846
3847                 get_random_bytes(&urpa, 6);
3848                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3849
3850                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3851                 set_random_addr(req, &urpa);
3852                 return 0;
3853         }
3854
3855         /* If forcing static address is in use or there is no public
3856          * address use the static address as random address (but skip
3857          * the HCI command if the current random address is already the
3858          * static one.
3859          */
3860         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3861             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3862                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3863                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3864                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3865                                     &hdev->static_addr);
3866                 return 0;
3867         }
3868
3869         /* Neither privacy nor static address is being used so use a
3870          * public address.
3871          */
3872         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3873
3874         return 0;
3875 }
3876
3877 /* Copy the Identity Address of the controller.
3878  *
3879  * If the controller has a public BD_ADDR, then by default use that one.
3880  * If this is a LE only controller without a public address, default to
3881  * the static random address.
3882  *
3883  * For debugging purposes it is possible to force controllers with a
3884  * public address to use the static random address instead.
3885  */
3886 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3887                                u8 *bdaddr_type)
3888 {
3889         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3890             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3891                 bacpy(bdaddr, &hdev->static_addr);
3892                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3893         } else {
3894                 bacpy(bdaddr, &hdev->bdaddr);
3895                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3896         }
3897 }
3898
3899 /* Alloc HCI device */
3900 struct hci_dev *hci_alloc_dev(void)
3901 {
3902         struct hci_dev *hdev;
3903
3904         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3905         if (!hdev)
3906                 return NULL;
3907
3908         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3909         hdev->esco_type = (ESCO_HV1);
3910         hdev->link_mode = (HCI_LM_ACCEPT);
3911         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3912         hdev->io_capability = 0x03;     /* No Input No Output */
3913         hdev->manufacturer = 0xffff;    /* Default to internal use */
3914         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3915         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3916
3917         hdev->sniff_max_interval = 800;
3918         hdev->sniff_min_interval = 80;
3919
3920         hdev->le_adv_channel_map = 0x07;
3921         hdev->le_scan_interval = 0x0060;
3922         hdev->le_scan_window = 0x0030;
3923         hdev->le_conn_min_interval = 0x0028;
3924         hdev->le_conn_max_interval = 0x0038;
3925         hdev->le_conn_latency = 0x0000;
3926         hdev->le_supv_timeout = 0x002a;
3927
3928         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3929         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3930         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3931         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3932
3933         mutex_init(&hdev->lock);
3934         mutex_init(&hdev->req_lock);
3935
3936         INIT_LIST_HEAD(&hdev->mgmt_pending);
3937         INIT_LIST_HEAD(&hdev->blacklist);
3938         INIT_LIST_HEAD(&hdev->whitelist);
3939         INIT_LIST_HEAD(&hdev->uuids);
3940         INIT_LIST_HEAD(&hdev->link_keys);
3941         INIT_LIST_HEAD(&hdev->long_term_keys);
3942         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3943         INIT_LIST_HEAD(&hdev->remote_oob_data);
3944         INIT_LIST_HEAD(&hdev->le_white_list);
3945         INIT_LIST_HEAD(&hdev->le_conn_params);
3946         INIT_LIST_HEAD(&hdev->pend_le_conns);
3947         INIT_LIST_HEAD(&hdev->pend_le_reports);
3948         INIT_LIST_HEAD(&hdev->conn_hash.list);
3949
3950         INIT_WORK(&hdev->rx_work, hci_rx_work);
3951         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3952         INIT_WORK(&hdev->tx_work, hci_tx_work);
3953         INIT_WORK(&hdev->power_on, hci_power_on);
3954
3955         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3956         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3957         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3958
3959         skb_queue_head_init(&hdev->rx_q);
3960         skb_queue_head_init(&hdev->cmd_q);
3961         skb_queue_head_init(&hdev->raw_q);
3962
3963         init_waitqueue_head(&hdev->req_wait_q);
3964
3965         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3966
3967         hci_init_sysfs(hdev);
3968         discovery_init(hdev);
3969
3970         return hdev;
3971 }
3972 EXPORT_SYMBOL(hci_alloc_dev);
3973
3974 /* Free HCI device */
3975 void hci_free_dev(struct hci_dev *hdev)
3976 {
3977         /* will free via device release */
3978         put_device(&hdev->dev);
3979 }
3980 EXPORT_SYMBOL(hci_free_dev);
3981
3982 /* Register HCI device */
3983 int hci_register_dev(struct hci_dev *hdev)
3984 {
3985         int id, error;
3986
3987         if (!hdev->open || !hdev->close || !hdev->send)
3988                 return -EINVAL;
3989
3990         /* Do not allow HCI_AMP devices to register at index 0,
3991          * so the index can be used as the AMP controller ID.
3992          */
3993         switch (hdev->dev_type) {
3994         case HCI_BREDR:
3995                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3996                 break;
3997         case HCI_AMP:
3998                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3999                 break;
4000         default:
4001                 return -EINVAL;
4002         }
4003
4004         if (id < 0)
4005                 return id;
4006
4007         sprintf(hdev->name, "hci%d", id);
4008         hdev->id = id;
4009
4010         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4011
4012         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4013                                           WQ_MEM_RECLAIM, 1, hdev->name);
4014         if (!hdev->workqueue) {
4015                 error = -ENOMEM;
4016                 goto err;
4017         }
4018
4019         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4020                                               WQ_MEM_RECLAIM, 1, hdev->name);
4021         if (!hdev->req_workqueue) {
4022                 destroy_workqueue(hdev->workqueue);
4023                 error = -ENOMEM;
4024                 goto err;
4025         }
4026
4027         if (!IS_ERR_OR_NULL(bt_debugfs))
4028                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4029
4030         dev_set_name(&hdev->dev, "%s", hdev->name);
4031
4032         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4033                                                CRYPTO_ALG_ASYNC);
4034         if (IS_ERR(hdev->tfm_aes)) {
4035                 BT_ERR("Unable to create crypto context");
4036                 error = PTR_ERR(hdev->tfm_aes);
4037                 hdev->tfm_aes = NULL;
4038                 goto err_wqueue;
4039         }
4040
4041         error = device_add(&hdev->dev);
4042         if (error < 0)
4043                 goto err_tfm;
4044
4045         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4046                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4047                                     hdev);
4048         if (hdev->rfkill) {
4049                 if (rfkill_register(hdev->rfkill) < 0) {
4050                         rfkill_destroy(hdev->rfkill);
4051                         hdev->rfkill = NULL;
4052                 }
4053         }
4054
4055         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4056                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4057
4058         set_bit(HCI_SETUP, &hdev->dev_flags);
4059         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4060
4061         if (hdev->dev_type == HCI_BREDR) {
4062                 /* Assume BR/EDR support until proven otherwise (such as
4063                  * through reading supported features during init.
4064                  */
4065                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4066         }
4067
4068         write_lock(&hci_dev_list_lock);
4069         list_add(&hdev->list, &hci_dev_list);
4070         write_unlock(&hci_dev_list_lock);
4071
4072         /* Devices that are marked for raw-only usage are unconfigured
4073          * and should not be included in normal operation.
4074          */
4075         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4076                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4077
4078         hci_notify(hdev, HCI_DEV_REG);
4079         hci_dev_hold(hdev);
4080
4081         queue_work(hdev->req_workqueue, &hdev->power_on);
4082
4083         return id;
4084
4085 err_tfm:
4086         crypto_free_blkcipher(hdev->tfm_aes);
4087 err_wqueue:
4088         destroy_workqueue(hdev->workqueue);
4089         destroy_workqueue(hdev->req_workqueue);
4090 err:
4091         ida_simple_remove(&hci_index_ida, hdev->id);
4092
4093         return error;
4094 }
4095 EXPORT_SYMBOL(hci_register_dev);
4096
4097 /* Unregister HCI device */
4098 void hci_unregister_dev(struct hci_dev *hdev)
4099 {
4100         int i, id;
4101
4102         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4103
4104         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4105
4106         id = hdev->id;
4107
4108         write_lock(&hci_dev_list_lock);
4109         list_del(&hdev->list);
4110         write_unlock(&hci_dev_list_lock);
4111
4112         hci_dev_do_close(hdev);
4113
4114         for (i = 0; i < NUM_REASSEMBLY; i++)
4115                 kfree_skb(hdev->reassembly[i]);
4116
4117         cancel_work_sync(&hdev->power_on);
4118
4119         if (!test_bit(HCI_INIT, &hdev->flags) &&
4120             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4121             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4122                 hci_dev_lock(hdev);
4123                 mgmt_index_removed(hdev);
4124                 hci_dev_unlock(hdev);
4125         }
4126
4127         /* mgmt_index_removed should take care of emptying the
4128          * pending list */
4129         BUG_ON(!list_empty(&hdev->mgmt_pending));
4130
4131         hci_notify(hdev, HCI_DEV_UNREG);
4132
4133         if (hdev->rfkill) {
4134                 rfkill_unregister(hdev->rfkill);
4135                 rfkill_destroy(hdev->rfkill);
4136         }
4137
4138         if (hdev->tfm_aes)
4139                 crypto_free_blkcipher(hdev->tfm_aes);
4140
4141         device_del(&hdev->dev);
4142
4143         debugfs_remove_recursive(hdev->debugfs);
4144
4145         destroy_workqueue(hdev->workqueue);
4146         destroy_workqueue(hdev->req_workqueue);
4147
4148         hci_dev_lock(hdev);
4149         hci_bdaddr_list_clear(&hdev->blacklist);
4150         hci_bdaddr_list_clear(&hdev->whitelist);
4151         hci_uuids_clear(hdev);
4152         hci_link_keys_clear(hdev);
4153         hci_smp_ltks_clear(hdev);
4154         hci_smp_irks_clear(hdev);
4155         hci_remote_oob_data_clear(hdev);
4156         hci_bdaddr_list_clear(&hdev->le_white_list);
4157         hci_conn_params_clear_all(hdev);
4158         hci_dev_unlock(hdev);
4159
4160         hci_dev_put(hdev);
4161
4162         ida_simple_remove(&hci_index_ida, id);
4163 }
4164 EXPORT_SYMBOL(hci_unregister_dev);
4165
4166 /* Suspend HCI device */
4167 int hci_suspend_dev(struct hci_dev *hdev)
4168 {
4169         hci_notify(hdev, HCI_DEV_SUSPEND);
4170         return 0;
4171 }
4172 EXPORT_SYMBOL(hci_suspend_dev);
4173
4174 /* Resume HCI device */
4175 int hci_resume_dev(struct hci_dev *hdev)
4176 {
4177         hci_notify(hdev, HCI_DEV_RESUME);
4178         return 0;
4179 }
4180 EXPORT_SYMBOL(hci_resume_dev);
4181
4182 /* Receive frame from HCI drivers */
4183 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4184 {
4185         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4186                       && !test_bit(HCI_INIT, &hdev->flags))) {
4187                 kfree_skb(skb);
4188                 return -ENXIO;
4189         }
4190
4191         /* Incoming skb */
4192         bt_cb(skb)->incoming = 1;
4193
4194         /* Time stamp */
4195         __net_timestamp(skb);
4196
4197         skb_queue_tail(&hdev->rx_q, skb);
4198         queue_work(hdev->workqueue, &hdev->rx_work);
4199
4200         return 0;
4201 }
4202 EXPORT_SYMBOL(hci_recv_frame);
4203
4204 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4205                           int count, __u8 index)
4206 {
4207         int len = 0;
4208         int hlen = 0;
4209         int remain = count;
4210         struct sk_buff *skb;
4211         struct bt_skb_cb *scb;
4212
4213         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4214             index >= NUM_REASSEMBLY)
4215                 return -EILSEQ;
4216
4217         skb = hdev->reassembly[index];
4218
4219         if (!skb) {
4220                 switch (type) {
4221                 case HCI_ACLDATA_PKT:
4222                         len = HCI_MAX_FRAME_SIZE;
4223                         hlen = HCI_ACL_HDR_SIZE;
4224                         break;
4225                 case HCI_EVENT_PKT:
4226                         len = HCI_MAX_EVENT_SIZE;
4227                         hlen = HCI_EVENT_HDR_SIZE;
4228                         break;
4229                 case HCI_SCODATA_PKT:
4230                         len = HCI_MAX_SCO_SIZE;
4231                         hlen = HCI_SCO_HDR_SIZE;
4232                         break;
4233                 }
4234
4235                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4236                 if (!skb)
4237                         return -ENOMEM;
4238
4239                 scb = (void *) skb->cb;
4240                 scb->expect = hlen;
4241                 scb->pkt_type = type;
4242
4243                 hdev->reassembly[index] = skb;
4244         }
4245
4246         while (count) {
4247                 scb = (void *) skb->cb;
4248                 len = min_t(uint, scb->expect, count);
4249
4250                 memcpy(skb_put(skb, len), data, len);
4251
4252                 count -= len;
4253                 data += len;
4254                 scb->expect -= len;
4255                 remain = count;
4256
4257                 switch (type) {
4258                 case HCI_EVENT_PKT:
4259                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4260                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4261                                 scb->expect = h->plen;
4262
4263                                 if (skb_tailroom(skb) < scb->expect) {
4264                                         kfree_skb(skb);
4265                                         hdev->reassembly[index] = NULL;
4266                                         return -ENOMEM;
4267                                 }
4268                         }
4269                         break;
4270
4271                 case HCI_ACLDATA_PKT:
4272                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4273                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4274                                 scb->expect = __le16_to_cpu(h->dlen);
4275
4276                                 if (skb_tailroom(skb) < scb->expect) {
4277                                         kfree_skb(skb);
4278                                         hdev->reassembly[index] = NULL;
4279                                         return -ENOMEM;
4280                                 }
4281                         }
4282                         break;
4283
4284                 case HCI_SCODATA_PKT:
4285                         if (skb->len == HCI_SCO_HDR_SIZE) {
4286                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4287                                 scb->expect = h->dlen;
4288
4289                                 if (skb_tailroom(skb) < scb->expect) {
4290                                         kfree_skb(skb);
4291                                         hdev->reassembly[index] = NULL;
4292                                         return -ENOMEM;
4293                                 }
4294                         }
4295                         break;
4296                 }
4297
4298                 if (scb->expect == 0) {
4299                         /* Complete frame */
4300
4301                         bt_cb(skb)->pkt_type = type;
4302                         hci_recv_frame(hdev, skb);
4303
4304                         hdev->reassembly[index] = NULL;
4305                         return remain;
4306                 }
4307         }
4308
4309         return remain;
4310 }
4311
4312 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4313 {
4314         int rem = 0;
4315
4316         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4317                 return -EILSEQ;
4318
4319         while (count) {
4320                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4321                 if (rem < 0)
4322                         return rem;
4323
4324                 data += (count - rem);
4325                 count = rem;
4326         }
4327
4328         return rem;
4329 }
4330 EXPORT_SYMBOL(hci_recv_fragment);
4331
4332 #define STREAM_REASSEMBLY 0
4333
4334 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4335 {
4336         int type;
4337         int rem = 0;
4338
4339         while (count) {
4340                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4341
4342                 if (!skb) {
4343                         struct { char type; } *pkt;
4344
4345                         /* Start of the frame */
4346                         pkt = data;
4347                         type = pkt->type;
4348
4349                         data++;
4350                         count--;
4351                 } else
4352                         type = bt_cb(skb)->pkt_type;
4353
4354                 rem = hci_reassembly(hdev, type, data, count,
4355                                      STREAM_REASSEMBLY);
4356                 if (rem < 0)
4357                         return rem;
4358
4359                 data += (count - rem);
4360                 count = rem;
4361         }
4362
4363         return rem;
4364 }
4365 EXPORT_SYMBOL(hci_recv_stream_fragment);
4366
4367 /* ---- Interface to upper protocols ---- */
4368
4369 int hci_register_cb(struct hci_cb *cb)
4370 {
4371         BT_DBG("%p name %s", cb, cb->name);
4372
4373         write_lock(&hci_cb_list_lock);
4374         list_add(&cb->list, &hci_cb_list);
4375         write_unlock(&hci_cb_list_lock);
4376
4377         return 0;
4378 }
4379 EXPORT_SYMBOL(hci_register_cb);
4380
4381 int hci_unregister_cb(struct hci_cb *cb)
4382 {
4383         BT_DBG("%p name %s", cb, cb->name);
4384
4385         write_lock(&hci_cb_list_lock);
4386         list_del(&cb->list);
4387         write_unlock(&hci_cb_list_lock);
4388
4389         return 0;
4390 }
4391 EXPORT_SYMBOL(hci_unregister_cb);
4392
4393 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4394 {
4395         int err;
4396
4397         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4398
4399         /* Time stamp */
4400         __net_timestamp(skb);
4401
4402         /* Send copy to monitor */
4403         hci_send_to_monitor(hdev, skb);
4404
4405         if (atomic_read(&hdev->promisc)) {
4406                 /* Send copy to the sockets */
4407                 hci_send_to_sock(hdev, skb);
4408         }
4409
4410         /* Get rid of skb owner, prior to sending to the driver. */
4411         skb_orphan(skb);
4412
4413         err = hdev->send(hdev, skb);
4414         if (err < 0) {
4415                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4416                 kfree_skb(skb);
4417         }
4418 }
4419
4420 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4421 {
4422         skb_queue_head_init(&req->cmd_q);
4423         req->hdev = hdev;
4424         req->err = 0;
4425 }
4426
4427 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4428 {
4429         struct hci_dev *hdev = req->hdev;
4430         struct sk_buff *skb;
4431         unsigned long flags;
4432
4433         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4434
4435         /* If an error occured during request building, remove all HCI
4436          * commands queued on the HCI request queue.
4437          */
4438         if (req->err) {
4439                 skb_queue_purge(&req->cmd_q);
4440                 return req->err;
4441         }
4442
4443         /* Do not allow empty requests */
4444         if (skb_queue_empty(&req->cmd_q))
4445                 return -ENODATA;
4446
4447         skb = skb_peek_tail(&req->cmd_q);
4448         bt_cb(skb)->req.complete = complete;
4449
4450         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4451         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4452         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4453
4454         queue_work(hdev->workqueue, &hdev->cmd_work);
4455
4456         return 0;
4457 }
4458
4459 bool hci_req_pending(struct hci_dev *hdev)
4460 {
4461         return (hdev->req_status == HCI_REQ_PEND);
4462 }
4463
4464 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4465                                        u32 plen, const void *param)
4466 {
4467         int len = HCI_COMMAND_HDR_SIZE + plen;
4468         struct hci_command_hdr *hdr;
4469         struct sk_buff *skb;
4470
4471         skb = bt_skb_alloc(len, GFP_ATOMIC);
4472         if (!skb)
4473                 return NULL;
4474
4475         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4476         hdr->opcode = cpu_to_le16(opcode);
4477         hdr->plen   = plen;
4478
4479         if (plen)
4480                 memcpy(skb_put(skb, plen), param, plen);
4481
4482         BT_DBG("skb len %d", skb->len);
4483
4484         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4485
4486         return skb;
4487 }
4488
4489 /* Send HCI command */
4490 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4491                  const void *param)
4492 {
4493         struct sk_buff *skb;
4494
4495         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4496
4497         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4498         if (!skb) {
4499                 BT_ERR("%s no memory for command", hdev->name);
4500                 return -ENOMEM;
4501         }
4502
4503         /* Stand-alone HCI commands must be flaged as
4504          * single-command requests.
4505          */
4506         bt_cb(skb)->req.start = true;
4507
4508         skb_queue_tail(&hdev->cmd_q, skb);
4509         queue_work(hdev->workqueue, &hdev->cmd_work);
4510
4511         return 0;
4512 }
4513
4514 /* Queue a command to an asynchronous HCI request */
4515 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4516                     const void *param, u8 event)
4517 {
4518         struct hci_dev *hdev = req->hdev;
4519         struct sk_buff *skb;
4520
4521         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4522
4523         /* If an error occured during request building, there is no point in
4524          * queueing the HCI command. We can simply return.
4525          */
4526         if (req->err)
4527                 return;
4528
4529         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4530         if (!skb) {
4531                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4532                        hdev->name, opcode);
4533                 req->err = -ENOMEM;
4534                 return;
4535         }
4536
4537         if (skb_queue_empty(&req->cmd_q))
4538                 bt_cb(skb)->req.start = true;
4539
4540         bt_cb(skb)->req.event = event;
4541
4542         skb_queue_tail(&req->cmd_q, skb);
4543 }
4544
4545 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4546                  const void *param)
4547 {
4548         hci_req_add_ev(req, opcode, plen, param, 0);
4549 }
4550
4551 /* Get data from the previously sent command */
4552 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4553 {
4554         struct hci_command_hdr *hdr;
4555
4556         if (!hdev->sent_cmd)
4557                 return NULL;
4558
4559         hdr = (void *) hdev->sent_cmd->data;
4560
4561         if (hdr->opcode != cpu_to_le16(opcode))
4562                 return NULL;
4563
4564         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4565
4566         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4567 }
4568
4569 /* Send ACL data */
4570 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4571 {
4572         struct hci_acl_hdr *hdr;
4573         int len = skb->len;
4574
4575         skb_push(skb, HCI_ACL_HDR_SIZE);
4576         skb_reset_transport_header(skb);
4577         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4578         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4579         hdr->dlen   = cpu_to_le16(len);
4580 }
4581
4582 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4583                           struct sk_buff *skb, __u16 flags)
4584 {
4585         struct hci_conn *conn = chan->conn;
4586         struct hci_dev *hdev = conn->hdev;
4587         struct sk_buff *list;
4588
4589         skb->len = skb_headlen(skb);
4590         skb->data_len = 0;
4591
4592         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4593
4594         switch (hdev->dev_type) {
4595         case HCI_BREDR:
4596                 hci_add_acl_hdr(skb, conn->handle, flags);
4597                 break;
4598         case HCI_AMP:
4599                 hci_add_acl_hdr(skb, chan->handle, flags);
4600                 break;
4601         default:
4602                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4603                 return;
4604         }
4605
4606         list = skb_shinfo(skb)->frag_list;
4607         if (!list) {
4608                 /* Non fragmented */
4609                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4610
4611                 skb_queue_tail(queue, skb);
4612         } else {
4613                 /* Fragmented */
4614                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4615
4616                 skb_shinfo(skb)->frag_list = NULL;
4617
4618                 /* Queue all fragments atomically */
4619                 spin_lock(&queue->lock);
4620
4621                 __skb_queue_tail(queue, skb);
4622
4623                 flags &= ~ACL_START;
4624                 flags |= ACL_CONT;
4625                 do {
4626                         skb = list; list = list->next;
4627
4628                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4629                         hci_add_acl_hdr(skb, conn->handle, flags);
4630
4631                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4632
4633                         __skb_queue_tail(queue, skb);
4634                 } while (list);
4635
4636                 spin_unlock(&queue->lock);
4637         }
4638 }
4639
4640 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4641 {
4642         struct hci_dev *hdev = chan->conn->hdev;
4643
4644         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4645
4646         hci_queue_acl(chan, &chan->data_q, skb, flags);
4647
4648         queue_work(hdev->workqueue, &hdev->tx_work);
4649 }
4650
4651 /* Send SCO data */
4652 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4653 {
4654         struct hci_dev *hdev = conn->hdev;
4655         struct hci_sco_hdr hdr;
4656
4657         BT_DBG("%s len %d", hdev->name, skb->len);
4658
4659         hdr.handle = cpu_to_le16(conn->handle);
4660         hdr.dlen   = skb->len;
4661
4662         skb_push(skb, HCI_SCO_HDR_SIZE);
4663         skb_reset_transport_header(skb);
4664         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4665
4666         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4667
4668         skb_queue_tail(&conn->data_q, skb);
4669         queue_work(hdev->workqueue, &hdev->tx_work);
4670 }
4671
4672 /* ---- HCI TX task (outgoing data) ---- */
4673
4674 /* HCI Connection scheduler */
4675 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4676                                      int *quote)
4677 {
4678         struct hci_conn_hash *h = &hdev->conn_hash;
4679         struct hci_conn *conn = NULL, *c;
4680         unsigned int num = 0, min = ~0;
4681
4682         /* We don't have to lock device here. Connections are always
4683          * added and removed with TX task disabled. */
4684
4685         rcu_read_lock();
4686
4687         list_for_each_entry_rcu(c, &h->list, list) {
4688                 if (c->type != type || skb_queue_empty(&c->data_q))
4689                         continue;
4690
4691                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4692                         continue;
4693
4694                 num++;
4695
4696                 if (c->sent < min) {
4697                         min  = c->sent;
4698                         conn = c;
4699                 }
4700
4701                 if (hci_conn_num(hdev, type) == num)
4702                         break;
4703         }
4704
4705         rcu_read_unlock();
4706
4707         if (conn) {
4708                 int cnt, q;
4709
4710                 switch (conn->type) {
4711                 case ACL_LINK:
4712                         cnt = hdev->acl_cnt;
4713                         break;
4714                 case SCO_LINK:
4715                 case ESCO_LINK:
4716                         cnt = hdev->sco_cnt;
4717                         break;
4718                 case LE_LINK:
4719                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4720                         break;
4721                 default:
4722                         cnt = 0;
4723                         BT_ERR("Unknown link type");
4724                 }
4725
4726                 q = cnt / num;
4727                 *quote = q ? q : 1;
4728         } else
4729                 *quote = 0;
4730
4731         BT_DBG("conn %p quote %d", conn, *quote);
4732         return conn;
4733 }
4734
4735 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4736 {
4737         struct hci_conn_hash *h = &hdev->conn_hash;
4738         struct hci_conn *c;
4739
4740         BT_ERR("%s link tx timeout", hdev->name);
4741
4742         rcu_read_lock();
4743
4744         /* Kill stalled connections */
4745         list_for_each_entry_rcu(c, &h->list, list) {
4746                 if (c->type == type && c->sent) {
4747                         BT_ERR("%s killing stalled connection %pMR",
4748                                hdev->name, &c->dst);
4749                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4750                 }
4751         }
4752
4753         rcu_read_unlock();
4754 }
4755
4756 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4757                                       int *quote)
4758 {
4759         struct hci_conn_hash *h = &hdev->conn_hash;
4760         struct hci_chan *chan = NULL;
4761         unsigned int num = 0, min = ~0, cur_prio = 0;
4762         struct hci_conn *conn;
4763         int cnt, q, conn_num = 0;
4764
4765         BT_DBG("%s", hdev->name);
4766
4767         rcu_read_lock();
4768
4769         list_for_each_entry_rcu(conn, &h->list, list) {
4770                 struct hci_chan *tmp;
4771
4772                 if (conn->type != type)
4773                         continue;
4774
4775                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4776                         continue;
4777
4778                 conn_num++;
4779
4780                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4781                         struct sk_buff *skb;
4782
4783                         if (skb_queue_empty(&tmp->data_q))
4784                                 continue;
4785
4786                         skb = skb_peek(&tmp->data_q);
4787                         if (skb->priority < cur_prio)
4788                                 continue;
4789
4790                         if (skb->priority > cur_prio) {
4791                                 num = 0;
4792                                 min = ~0;
4793                                 cur_prio = skb->priority;
4794                         }
4795
4796                         num++;
4797
4798                         if (conn->sent < min) {
4799                                 min  = conn->sent;
4800                                 chan = tmp;
4801                         }
4802                 }
4803
4804                 if (hci_conn_num(hdev, type) == conn_num)
4805                         break;
4806         }
4807
4808         rcu_read_unlock();
4809
4810         if (!chan)
4811                 return NULL;
4812
4813         switch (chan->conn->type) {
4814         case ACL_LINK:
4815                 cnt = hdev->acl_cnt;
4816                 break;
4817         case AMP_LINK:
4818                 cnt = hdev->block_cnt;
4819                 break;
4820         case SCO_LINK:
4821         case ESCO_LINK:
4822                 cnt = hdev->sco_cnt;
4823                 break;
4824         case LE_LINK:
4825                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4826                 break;
4827         default:
4828                 cnt = 0;
4829                 BT_ERR("Unknown link type");
4830         }
4831
4832         q = cnt / num;
4833         *quote = q ? q : 1;
4834         BT_DBG("chan %p quote %d", chan, *quote);
4835         return chan;
4836 }
4837
4838 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4839 {
4840         struct hci_conn_hash *h = &hdev->conn_hash;
4841         struct hci_conn *conn;
4842         int num = 0;
4843
4844         BT_DBG("%s", hdev->name);
4845
4846         rcu_read_lock();
4847
4848         list_for_each_entry_rcu(conn, &h->list, list) {
4849                 struct hci_chan *chan;
4850
4851                 if (conn->type != type)
4852                         continue;
4853
4854                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4855                         continue;
4856
4857                 num++;
4858
4859                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4860                         struct sk_buff *skb;
4861
4862                         if (chan->sent) {
4863                                 chan->sent = 0;
4864                                 continue;
4865                         }
4866
4867                         if (skb_queue_empty(&chan->data_q))
4868                                 continue;
4869
4870                         skb = skb_peek(&chan->data_q);
4871                         if (skb->priority >= HCI_PRIO_MAX - 1)
4872                                 continue;
4873
4874                         skb->priority = HCI_PRIO_MAX - 1;
4875
4876                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4877                                skb->priority);
4878                 }
4879
4880                 if (hci_conn_num(hdev, type) == num)
4881                         break;
4882         }
4883
4884         rcu_read_unlock();
4885
4886 }
4887
4888 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4889 {
4890         /* Calculate count of blocks used by this packet */
4891         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4892 }
4893
4894 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4895 {
4896         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4897                 /* ACL tx timeout must be longer than maximum
4898                  * link supervision timeout (40.9 seconds) */
4899                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4900                                        HCI_ACL_TX_TIMEOUT))
4901                         hci_link_tx_to(hdev, ACL_LINK);
4902         }
4903 }
4904
4905 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4906 {
4907         unsigned int cnt = hdev->acl_cnt;
4908         struct hci_chan *chan;
4909         struct sk_buff *skb;
4910         int quote;
4911
4912         __check_timeout(hdev, cnt);
4913
4914         while (hdev->acl_cnt &&
4915                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4916                 u32 priority = (skb_peek(&chan->data_q))->priority;
4917                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4918                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4919                                skb->len, skb->priority);
4920
4921                         /* Stop if priority has changed */
4922                         if (skb->priority < priority)
4923                                 break;
4924
4925                         skb = skb_dequeue(&chan->data_q);
4926
4927                         hci_conn_enter_active_mode(chan->conn,
4928                                                    bt_cb(skb)->force_active);
4929
4930                         hci_send_frame(hdev, skb);
4931                         hdev->acl_last_tx = jiffies;
4932
4933                         hdev->acl_cnt--;
4934                         chan->sent++;
4935                         chan->conn->sent++;
4936                 }
4937         }
4938
4939         if (cnt != hdev->acl_cnt)
4940                 hci_prio_recalculate(hdev, ACL_LINK);
4941 }
4942
4943 static void hci_sched_acl_blk(struct hci_dev *hdev)
4944 {
4945         unsigned int cnt = hdev->block_cnt;
4946         struct hci_chan *chan;
4947         struct sk_buff *skb;
4948         int quote;
4949         u8 type;
4950
4951         __check_timeout(hdev, cnt);
4952
4953         BT_DBG("%s", hdev->name);
4954
4955         if (hdev->dev_type == HCI_AMP)
4956                 type = AMP_LINK;
4957         else
4958                 type = ACL_LINK;
4959
4960         while (hdev->block_cnt > 0 &&
4961                (chan = hci_chan_sent(hdev, type, &quote))) {
4962                 u32 priority = (skb_peek(&chan->data_q))->priority;
4963                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4964                         int blocks;
4965
4966                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4967                                skb->len, skb->priority);
4968
4969                         /* Stop if priority has changed */
4970                         if (skb->priority < priority)
4971                                 break;
4972
4973                         skb = skb_dequeue(&chan->data_q);
4974
4975                         blocks = __get_blocks(hdev, skb);
4976                         if (blocks > hdev->block_cnt)
4977                                 return;
4978
4979                         hci_conn_enter_active_mode(chan->conn,
4980                                                    bt_cb(skb)->force_active);
4981
4982                         hci_send_frame(hdev, skb);
4983                         hdev->acl_last_tx = jiffies;
4984
4985                         hdev->block_cnt -= blocks;
4986                         quote -= blocks;
4987
4988                         chan->sent += blocks;
4989                         chan->conn->sent += blocks;
4990                 }
4991         }
4992
4993         if (cnt != hdev->block_cnt)
4994                 hci_prio_recalculate(hdev, type);
4995 }
4996
4997 static void hci_sched_acl(struct hci_dev *hdev)
4998 {
4999         BT_DBG("%s", hdev->name);
5000
5001         /* No ACL link over BR/EDR controller */
5002         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5003                 return;
5004
5005         /* No AMP link over AMP controller */
5006         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5007                 return;
5008
5009         switch (hdev->flow_ctl_mode) {
5010         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5011                 hci_sched_acl_pkt(hdev);
5012                 break;
5013
5014         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5015                 hci_sched_acl_blk(hdev);
5016                 break;
5017         }
5018 }
5019
5020 /* Schedule SCO */
5021 static void hci_sched_sco(struct hci_dev *hdev)
5022 {
5023         struct hci_conn *conn;
5024         struct sk_buff *skb;
5025         int quote;
5026
5027         BT_DBG("%s", hdev->name);
5028
5029         if (!hci_conn_num(hdev, SCO_LINK))
5030                 return;
5031
5032         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5033                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5034                         BT_DBG("skb %p len %d", skb, skb->len);
5035                         hci_send_frame(hdev, skb);
5036
5037                         conn->sent++;
5038                         if (conn->sent == ~0)
5039                                 conn->sent = 0;
5040                 }
5041         }
5042 }
5043
5044 static void hci_sched_esco(struct hci_dev *hdev)
5045 {
5046         struct hci_conn *conn;
5047         struct sk_buff *skb;
5048         int quote;
5049
5050         BT_DBG("%s", hdev->name);
5051
5052         if (!hci_conn_num(hdev, ESCO_LINK))
5053                 return;
5054
5055         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5056                                                      &quote))) {
5057                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5058                         BT_DBG("skb %p len %d", skb, skb->len);
5059                         hci_send_frame(hdev, skb);
5060
5061                         conn->sent++;
5062                         if (conn->sent == ~0)
5063                                 conn->sent = 0;
5064                 }
5065         }
5066 }
5067
5068 static void hci_sched_le(struct hci_dev *hdev)
5069 {
5070         struct hci_chan *chan;
5071         struct sk_buff *skb;
5072         int quote, cnt, tmp;
5073
5074         BT_DBG("%s", hdev->name);
5075
5076         if (!hci_conn_num(hdev, LE_LINK))
5077                 return;
5078
5079         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5080                 /* LE tx timeout must be longer than maximum
5081                  * link supervision timeout (40.9 seconds) */
5082                 if (!hdev->le_cnt && hdev->le_pkts &&
5083                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5084                         hci_link_tx_to(hdev, LE_LINK);
5085         }
5086
5087         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5088         tmp = cnt;
5089         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5090                 u32 priority = (skb_peek(&chan->data_q))->priority;
5091                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5092                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5093                                skb->len, skb->priority);
5094
5095                         /* Stop if priority has changed */
5096                         if (skb->priority < priority)
5097                                 break;
5098
5099                         skb = skb_dequeue(&chan->data_q);
5100
5101                         hci_send_frame(hdev, skb);
5102                         hdev->le_last_tx = jiffies;
5103
5104                         cnt--;
5105                         chan->sent++;
5106                         chan->conn->sent++;
5107                 }
5108         }
5109
5110         if (hdev->le_pkts)
5111                 hdev->le_cnt = cnt;
5112         else
5113                 hdev->acl_cnt = cnt;
5114
5115         if (cnt != tmp)
5116                 hci_prio_recalculate(hdev, LE_LINK);
5117 }
5118
5119 static void hci_tx_work(struct work_struct *work)
5120 {
5121         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5122         struct sk_buff *skb;
5123
5124         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5125                hdev->sco_cnt, hdev->le_cnt);
5126
5127         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5128                 /* Schedule queues and send stuff to HCI driver */
5129                 hci_sched_acl(hdev);
5130                 hci_sched_sco(hdev);
5131                 hci_sched_esco(hdev);
5132                 hci_sched_le(hdev);
5133         }
5134
5135         /* Send next queued raw (unknown type) packet */
5136         while ((skb = skb_dequeue(&hdev->raw_q)))
5137                 hci_send_frame(hdev, skb);
5138 }
5139
5140 /* ----- HCI RX task (incoming data processing) ----- */
5141
5142 /* ACL data packet */
5143 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5144 {
5145         struct hci_acl_hdr *hdr = (void *) skb->data;
5146         struct hci_conn *conn;
5147         __u16 handle, flags;
5148
5149         skb_pull(skb, HCI_ACL_HDR_SIZE);
5150
5151         handle = __le16_to_cpu(hdr->handle);
5152         flags  = hci_flags(handle);
5153         handle = hci_handle(handle);
5154
5155         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5156                handle, flags);
5157
5158         hdev->stat.acl_rx++;
5159
5160         hci_dev_lock(hdev);
5161         conn = hci_conn_hash_lookup_handle(hdev, handle);
5162         hci_dev_unlock(hdev);
5163
5164         if (conn) {
5165                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5166
5167                 /* Send to upper protocol */
5168                 l2cap_recv_acldata(conn, skb, flags);
5169                 return;
5170         } else {
5171                 BT_ERR("%s ACL packet for unknown connection handle %d",
5172                        hdev->name, handle);
5173         }
5174
5175         kfree_skb(skb);
5176 }
5177
5178 /* SCO data packet */
5179 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5180 {
5181         struct hci_sco_hdr *hdr = (void *) skb->data;
5182         struct hci_conn *conn;
5183         __u16 handle;
5184
5185         skb_pull(skb, HCI_SCO_HDR_SIZE);
5186
5187         handle = __le16_to_cpu(hdr->handle);
5188
5189         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5190
5191         hdev->stat.sco_rx++;
5192
5193         hci_dev_lock(hdev);
5194         conn = hci_conn_hash_lookup_handle(hdev, handle);
5195         hci_dev_unlock(hdev);
5196
5197         if (conn) {
5198                 /* Send to upper protocol */
5199                 sco_recv_scodata(conn, skb);
5200                 return;
5201         } else {
5202                 BT_ERR("%s SCO packet for unknown connection handle %d",
5203                        hdev->name, handle);
5204         }
5205
5206         kfree_skb(skb);
5207 }
5208
5209 static bool hci_req_is_complete(struct hci_dev *hdev)
5210 {
5211         struct sk_buff *skb;
5212
5213         skb = skb_peek(&hdev->cmd_q);
5214         if (!skb)
5215                 return true;
5216
5217         return bt_cb(skb)->req.start;
5218 }
5219
5220 static void hci_resend_last(struct hci_dev *hdev)
5221 {
5222         struct hci_command_hdr *sent;
5223         struct sk_buff *skb;
5224         u16 opcode;
5225
5226         if (!hdev->sent_cmd)
5227                 return;
5228
5229         sent = (void *) hdev->sent_cmd->data;
5230         opcode = __le16_to_cpu(sent->opcode);
5231         if (opcode == HCI_OP_RESET)
5232                 return;
5233
5234         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5235         if (!skb)
5236                 return;
5237
5238         skb_queue_head(&hdev->cmd_q, skb);
5239         queue_work(hdev->workqueue, &hdev->cmd_work);
5240 }
5241
5242 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5243 {
5244         hci_req_complete_t req_complete = NULL;
5245         struct sk_buff *skb;
5246         unsigned long flags;
5247
5248         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5249
5250         /* If the completed command doesn't match the last one that was
5251          * sent we need to do special handling of it.
5252          */
5253         if (!hci_sent_cmd_data(hdev, opcode)) {
5254                 /* Some CSR based controllers generate a spontaneous
5255                  * reset complete event during init and any pending
5256                  * command will never be completed. In such a case we
5257                  * need to resend whatever was the last sent
5258                  * command.
5259                  */
5260                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5261                         hci_resend_last(hdev);
5262
5263                 return;
5264         }
5265
5266         /* If the command succeeded and there's still more commands in
5267          * this request the request is not yet complete.
5268          */
5269         if (!status && !hci_req_is_complete(hdev))
5270                 return;
5271
5272         /* If this was the last command in a request the complete
5273          * callback would be found in hdev->sent_cmd instead of the
5274          * command queue (hdev->cmd_q).
5275          */
5276         if (hdev->sent_cmd) {
5277                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5278
5279                 if (req_complete) {
5280                         /* We must set the complete callback to NULL to
5281                          * avoid calling the callback more than once if
5282                          * this function gets called again.
5283                          */
5284                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5285
5286                         goto call_complete;
5287                 }
5288         }
5289
5290         /* Remove all pending commands belonging to this request */
5291         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5292         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5293                 if (bt_cb(skb)->req.start) {
5294                         __skb_queue_head(&hdev->cmd_q, skb);
5295                         break;
5296                 }
5297
5298                 req_complete = bt_cb(skb)->req.complete;
5299                 kfree_skb(skb);
5300         }
5301         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5302
5303 call_complete:
5304         if (req_complete)
5305                 req_complete(hdev, status);
5306 }
5307
5308 static void hci_rx_work(struct work_struct *work)
5309 {
5310         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5311         struct sk_buff *skb;
5312
5313         BT_DBG("%s", hdev->name);
5314
5315         while ((skb = skb_dequeue(&hdev->rx_q))) {
5316                 /* Send copy to monitor */
5317                 hci_send_to_monitor(hdev, skb);
5318
5319                 if (atomic_read(&hdev->promisc)) {
5320                         /* Send copy to the sockets */
5321                         hci_send_to_sock(hdev, skb);
5322                 }
5323
5324                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5325                         kfree_skb(skb);
5326                         continue;
5327                 }
5328
5329                 if (test_bit(HCI_INIT, &hdev->flags)) {
5330                         /* Don't process data packets in this states. */
5331                         switch (bt_cb(skb)->pkt_type) {
5332                         case HCI_ACLDATA_PKT:
5333                         case HCI_SCODATA_PKT:
5334                                 kfree_skb(skb);
5335                                 continue;
5336                         }
5337                 }
5338
5339                 /* Process frame */
5340                 switch (bt_cb(skb)->pkt_type) {
5341                 case HCI_EVENT_PKT:
5342                         BT_DBG("%s Event packet", hdev->name);
5343                         hci_event_packet(hdev, skb);
5344                         break;
5345
5346                 case HCI_ACLDATA_PKT:
5347                         BT_DBG("%s ACL data packet", hdev->name);
5348                         hci_acldata_packet(hdev, skb);
5349                         break;
5350
5351                 case HCI_SCODATA_PKT:
5352                         BT_DBG("%s SCO data packet", hdev->name);
5353                         hci_scodata_packet(hdev, skb);
5354                         break;
5355
5356                 default:
5357                         kfree_skb(skb);
5358                         break;
5359                 }
5360         }
5361 }
5362
5363 static void hci_cmd_work(struct work_struct *work)
5364 {
5365         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5366         struct sk_buff *skb;
5367
5368         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5369                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5370
5371         /* Send queued commands */
5372         if (atomic_read(&hdev->cmd_cnt)) {
5373                 skb = skb_dequeue(&hdev->cmd_q);
5374                 if (!skb)
5375                         return;
5376
5377                 kfree_skb(hdev->sent_cmd);
5378
5379                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5380                 if (hdev->sent_cmd) {
5381                         atomic_dec(&hdev->cmd_cnt);
5382                         hci_send_frame(hdev, skb);
5383                         if (test_bit(HCI_RESET, &hdev->flags))
5384                                 cancel_delayed_work(&hdev->cmd_timer);
5385                         else
5386                                 schedule_delayed_work(&hdev->cmd_timer,
5387                                                       HCI_CMD_TIMEOUT);
5388                 } else {
5389                         skb_queue_head(&hdev->cmd_q, skb);
5390                         queue_work(hdev->workqueue, &hdev->cmd_work);
5391                 }
5392         }
5393 }
5394
5395 void hci_req_add_le_scan_disable(struct hci_request *req)
5396 {
5397         struct hci_cp_le_set_scan_enable cp;
5398
5399         memset(&cp, 0, sizeof(cp));
5400         cp.enable = LE_SCAN_DISABLE;
5401         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5402 }
5403
5404 void hci_req_add_le_passive_scan(struct hci_request *req)
5405 {
5406         struct hci_cp_le_set_scan_param param_cp;
5407         struct hci_cp_le_set_scan_enable enable_cp;
5408         struct hci_dev *hdev = req->hdev;
5409         u8 own_addr_type;
5410
5411         /* Set require_privacy to false since no SCAN_REQ are send
5412          * during passive scanning. Not using an unresolvable address
5413          * here is important so that peer devices using direct
5414          * advertising with our address will be correctly reported
5415          * by the controller.
5416          */
5417         if (hci_update_random_address(req, false, &own_addr_type))
5418                 return;
5419
5420         memset(&param_cp, 0, sizeof(param_cp));
5421         param_cp.type = LE_SCAN_PASSIVE;
5422         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5423         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5424         param_cp.own_address_type = own_addr_type;
5425         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5426                     &param_cp);
5427
5428         memset(&enable_cp, 0, sizeof(enable_cp));
5429         enable_cp.enable = LE_SCAN_ENABLE;
5430         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5431         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5432                     &enable_cp);
5433 }
5434
5435 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5436 {
5437         if (status)
5438                 BT_DBG("HCI request failed to update background scanning: "
5439                        "status 0x%2.2x", status);
5440 }
5441
5442 /* This function controls the background scanning based on hdev->pend_le_conns
5443  * list. If there are pending LE connection we start the background scanning,
5444  * otherwise we stop it.
5445  *
5446  * This function requires the caller holds hdev->lock.
5447  */
5448 void hci_update_background_scan(struct hci_dev *hdev)
5449 {
5450         struct hci_request req;
5451         struct hci_conn *conn;
5452         int err;
5453
5454         if (!test_bit(HCI_UP, &hdev->flags) ||
5455             test_bit(HCI_INIT, &hdev->flags) ||
5456             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5457             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5458             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5459             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5460                 return;
5461
5462         /* No point in doing scanning if LE support hasn't been enabled */
5463         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5464                 return;
5465
5466         /* If discovery is active don't interfere with it */
5467         if (hdev->discovery.state != DISCOVERY_STOPPED)
5468                 return;
5469
5470         hci_req_init(&req, hdev);
5471
5472         if (list_empty(&hdev->pend_le_conns) &&
5473             list_empty(&hdev->pend_le_reports)) {
5474                 /* If there is no pending LE connections or devices
5475                  * to be scanned for, we should stop the background
5476                  * scanning.
5477                  */
5478
5479                 /* If controller is not scanning we are done. */
5480                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5481                         return;
5482
5483                 hci_req_add_le_scan_disable(&req);
5484
5485                 BT_DBG("%s stopping background scanning", hdev->name);
5486         } else {
5487                 /* If there is at least one pending LE connection, we should
5488                  * keep the background scan running.
5489                  */
5490
5491                 /* If controller is connecting, we should not start scanning
5492                  * since some controllers are not able to scan and connect at
5493                  * the same time.
5494                  */
5495                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5496                 if (conn)
5497                         return;
5498
5499                 /* If controller is currently scanning, we stop it to ensure we
5500                  * don't miss any advertising (due to duplicates filter).
5501                  */
5502                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5503                         hci_req_add_le_scan_disable(&req);
5504
5505                 hci_req_add_le_passive_scan(&req);
5506
5507                 BT_DBG("%s starting background scanning", hdev->name);
5508         }
5509
5510         err = hci_req_run(&req, update_background_scan_complete);
5511         if (err)
5512                 BT_ERR("Failed to run HCI request: err %d", err);
5513 }