Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ---- HCI notifications ---- */
58
59 static void hci_notify(struct hci_dev *hdev, int event)
60 {
61         hci_sock_dev_event(hdev, event);
62 }
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         char buf[32];
84         size_t buf_size = min(count, (sizeof(buf)-1));
85         bool enable;
86         int err;
87
88         if (!test_bit(HCI_UP, &hdev->flags))
89                 return -ENETDOWN;
90
91         if (copy_from_user(buf, user_buf, buf_size))
92                 return -EFAULT;
93
94         buf[buf_size] = '\0';
95         if (strtobool(buf, &enable))
96                 return -EINVAL;
97
98         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
99                 return -EALREADY;
100
101         hci_req_lock(hdev);
102         if (enable)
103                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104                                      HCI_CMD_TIMEOUT);
105         else
106                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107                                      HCI_CMD_TIMEOUT);
108         hci_req_unlock(hdev);
109
110         if (IS_ERR(skb))
111                 return PTR_ERR(skb);
112
113         err = -bt_to_errno(skb->data[0]);
114         kfree_skb(skb);
115
116         if (err < 0)
117                 return err;
118
119         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
120
121         return count;
122 }
123
124 static const struct file_operations dut_mode_fops = {
125         .open           = simple_open,
126         .read           = dut_mode_read,
127         .write          = dut_mode_write,
128         .llseek         = default_llseek,
129 };
130
131 static int features_show(struct seq_file *f, void *ptr)
132 {
133         struct hci_dev *hdev = f->private;
134         u8 p;
135
136         hci_dev_lock(hdev);
137         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140                            hdev->features[p][0], hdev->features[p][1],
141                            hdev->features[p][2], hdev->features[p][3],
142                            hdev->features[p][4], hdev->features[p][5],
143                            hdev->features[p][6], hdev->features[p][7]);
144         }
145         if (lmp_le_capable(hdev))
146                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148                            hdev->le_features[0], hdev->le_features[1],
149                            hdev->le_features[2], hdev->le_features[3],
150                            hdev->le_features[4], hdev->le_features[5],
151                            hdev->le_features[6], hdev->le_features[7]);
152         hci_dev_unlock(hdev);
153
154         return 0;
155 }
156
157 static int features_open(struct inode *inode, struct file *file)
158 {
159         return single_open(file, features_show, inode->i_private);
160 }
161
162 static const struct file_operations features_fops = {
163         .open           = features_open,
164         .read           = seq_read,
165         .llseek         = seq_lseek,
166         .release        = single_release,
167 };
168
169 static int blacklist_show(struct seq_file *f, void *p)
170 {
171         struct hci_dev *hdev = f->private;
172         struct bdaddr_list *b;
173
174         hci_dev_lock(hdev);
175         list_for_each_entry(b, &hdev->blacklist, list)
176                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177         hci_dev_unlock(hdev);
178
179         return 0;
180 }
181
182 static int blacklist_open(struct inode *inode, struct file *file)
183 {
184         return single_open(file, blacklist_show, inode->i_private);
185 }
186
187 static const struct file_operations blacklist_fops = {
188         .open           = blacklist_open,
189         .read           = seq_read,
190         .llseek         = seq_lseek,
191         .release        = single_release,
192 };
193
194 static int whitelist_show(struct seq_file *f, void *p)
195 {
196         struct hci_dev *hdev = f->private;
197         struct bdaddr_list *b;
198
199         hci_dev_lock(hdev);
200         list_for_each_entry(b, &hdev->whitelist, list)
201                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
202         hci_dev_unlock(hdev);
203
204         return 0;
205 }
206
207 static int whitelist_open(struct inode *inode, struct file *file)
208 {
209         return single_open(file, whitelist_show, inode->i_private);
210 }
211
212 static const struct file_operations whitelist_fops = {
213         .open           = whitelist_open,
214         .read           = seq_read,
215         .llseek         = seq_lseek,
216         .release        = single_release,
217 };
218
219 static int uuids_show(struct seq_file *f, void *p)
220 {
221         struct hci_dev *hdev = f->private;
222         struct bt_uuid *uuid;
223
224         hci_dev_lock(hdev);
225         list_for_each_entry(uuid, &hdev->uuids, list) {
226                 u8 i, val[16];
227
228                 /* The Bluetooth UUID values are stored in big endian,
229                  * but with reversed byte order. So convert them into
230                  * the right order for the %pUb modifier.
231                  */
232                 for (i = 0; i < 16; i++)
233                         val[i] = uuid->uuid[15 - i];
234
235                 seq_printf(f, "%pUb\n", val);
236         }
237         hci_dev_unlock(hdev);
238
239         return 0;
240 }
241
242 static int uuids_open(struct inode *inode, struct file *file)
243 {
244         return single_open(file, uuids_show, inode->i_private);
245 }
246
247 static const struct file_operations uuids_fops = {
248         .open           = uuids_open,
249         .read           = seq_read,
250         .llseek         = seq_lseek,
251         .release        = single_release,
252 };
253
254 static int inquiry_cache_show(struct seq_file *f, void *p)
255 {
256         struct hci_dev *hdev = f->private;
257         struct discovery_state *cache = &hdev->discovery;
258         struct inquiry_entry *e;
259
260         hci_dev_lock(hdev);
261
262         list_for_each_entry(e, &cache->all, all) {
263                 struct inquiry_data *data = &e->data;
264                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
265                            &data->bdaddr,
266                            data->pscan_rep_mode, data->pscan_period_mode,
267                            data->pscan_mode, data->dev_class[2],
268                            data->dev_class[1], data->dev_class[0],
269                            __le16_to_cpu(data->clock_offset),
270                            data->rssi, data->ssp_mode, e->timestamp);
271         }
272
273         hci_dev_unlock(hdev);
274
275         return 0;
276 }
277
278 static int inquiry_cache_open(struct inode *inode, struct file *file)
279 {
280         return single_open(file, inquiry_cache_show, inode->i_private);
281 }
282
283 static const struct file_operations inquiry_cache_fops = {
284         .open           = inquiry_cache_open,
285         .read           = seq_read,
286         .llseek         = seq_lseek,
287         .release        = single_release,
288 };
289
290 static int link_keys_show(struct seq_file *f, void *ptr)
291 {
292         struct hci_dev *hdev = f->private;
293         struct list_head *p, *n;
294
295         hci_dev_lock(hdev);
296         list_for_each_safe(p, n, &hdev->link_keys) {
297                 struct link_key *key = list_entry(p, struct link_key, list);
298                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
299                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
300         }
301         hci_dev_unlock(hdev);
302
303         return 0;
304 }
305
306 static int link_keys_open(struct inode *inode, struct file *file)
307 {
308         return single_open(file, link_keys_show, inode->i_private);
309 }
310
311 static const struct file_operations link_keys_fops = {
312         .open           = link_keys_open,
313         .read           = seq_read,
314         .llseek         = seq_lseek,
315         .release        = single_release,
316 };
317
318 static int dev_class_show(struct seq_file *f, void *ptr)
319 {
320         struct hci_dev *hdev = f->private;
321
322         hci_dev_lock(hdev);
323         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
324                    hdev->dev_class[1], hdev->dev_class[0]);
325         hci_dev_unlock(hdev);
326
327         return 0;
328 }
329
330 static int dev_class_open(struct inode *inode, struct file *file)
331 {
332         return single_open(file, dev_class_show, inode->i_private);
333 }
334
335 static const struct file_operations dev_class_fops = {
336         .open           = dev_class_open,
337         .read           = seq_read,
338         .llseek         = seq_lseek,
339         .release        = single_release,
340 };
341
342 static int voice_setting_get(void *data, u64 *val)
343 {
344         struct hci_dev *hdev = data;
345
346         hci_dev_lock(hdev);
347         *val = hdev->voice_setting;
348         hci_dev_unlock(hdev);
349
350         return 0;
351 }
352
353 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
354                         NULL, "0x%4.4llx\n");
355
356 static int auto_accept_delay_set(void *data, u64 val)
357 {
358         struct hci_dev *hdev = data;
359
360         hci_dev_lock(hdev);
361         hdev->auto_accept_delay = val;
362         hci_dev_unlock(hdev);
363
364         return 0;
365 }
366
367 static int auto_accept_delay_get(void *data, u64 *val)
368 {
369         struct hci_dev *hdev = data;
370
371         hci_dev_lock(hdev);
372         *val = hdev->auto_accept_delay;
373         hci_dev_unlock(hdev);
374
375         return 0;
376 }
377
378 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
379                         auto_accept_delay_set, "%llu\n");
380
381 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
382                                      size_t count, loff_t *ppos)
383 {
384         struct hci_dev *hdev = file->private_data;
385         char buf[3];
386
387         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
388         buf[1] = '\n';
389         buf[2] = '\0';
390         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
391 }
392
393 static ssize_t force_sc_support_write(struct file *file,
394                                       const char __user *user_buf,
395                                       size_t count, loff_t *ppos)
396 {
397         struct hci_dev *hdev = file->private_data;
398         char buf[32];
399         size_t buf_size = min(count, (sizeof(buf)-1));
400         bool enable;
401
402         if (test_bit(HCI_UP, &hdev->flags))
403                 return -EBUSY;
404
405         if (copy_from_user(buf, user_buf, buf_size))
406                 return -EFAULT;
407
408         buf[buf_size] = '\0';
409         if (strtobool(buf, &enable))
410                 return -EINVAL;
411
412         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
413                 return -EALREADY;
414
415         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
416
417         return count;
418 }
419
420 static const struct file_operations force_sc_support_fops = {
421         .open           = simple_open,
422         .read           = force_sc_support_read,
423         .write          = force_sc_support_write,
424         .llseek         = default_llseek,
425 };
426
427 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
428                                  size_t count, loff_t *ppos)
429 {
430         struct hci_dev *hdev = file->private_data;
431         char buf[3];
432
433         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
434         buf[1] = '\n';
435         buf[2] = '\0';
436         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
437 }
438
439 static const struct file_operations sc_only_mode_fops = {
440         .open           = simple_open,
441         .read           = sc_only_mode_read,
442         .llseek         = default_llseek,
443 };
444
445 static int idle_timeout_set(void *data, u64 val)
446 {
447         struct hci_dev *hdev = data;
448
449         if (val != 0 && (val < 500 || val > 3600000))
450                 return -EINVAL;
451
452         hci_dev_lock(hdev);
453         hdev->idle_timeout = val;
454         hci_dev_unlock(hdev);
455
456         return 0;
457 }
458
459 static int idle_timeout_get(void *data, u64 *val)
460 {
461         struct hci_dev *hdev = data;
462
463         hci_dev_lock(hdev);
464         *val = hdev->idle_timeout;
465         hci_dev_unlock(hdev);
466
467         return 0;
468 }
469
470 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
471                         idle_timeout_set, "%llu\n");
472
473 static int rpa_timeout_set(void *data, u64 val)
474 {
475         struct hci_dev *hdev = data;
476
477         /* Require the RPA timeout to be at least 30 seconds and at most
478          * 24 hours.
479          */
480         if (val < 30 || val > (60 * 60 * 24))
481                 return -EINVAL;
482
483         hci_dev_lock(hdev);
484         hdev->rpa_timeout = val;
485         hci_dev_unlock(hdev);
486
487         return 0;
488 }
489
490 static int rpa_timeout_get(void *data, u64 *val)
491 {
492         struct hci_dev *hdev = data;
493
494         hci_dev_lock(hdev);
495         *val = hdev->rpa_timeout;
496         hci_dev_unlock(hdev);
497
498         return 0;
499 }
500
501 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
502                         rpa_timeout_set, "%llu\n");
503
504 static int sniff_min_interval_set(void *data, u64 val)
505 {
506         struct hci_dev *hdev = data;
507
508         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
509                 return -EINVAL;
510
511         hci_dev_lock(hdev);
512         hdev->sniff_min_interval = val;
513         hci_dev_unlock(hdev);
514
515         return 0;
516 }
517
518 static int sniff_min_interval_get(void *data, u64 *val)
519 {
520         struct hci_dev *hdev = data;
521
522         hci_dev_lock(hdev);
523         *val = hdev->sniff_min_interval;
524         hci_dev_unlock(hdev);
525
526         return 0;
527 }
528
529 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
530                         sniff_min_interval_set, "%llu\n");
531
532 static int sniff_max_interval_set(void *data, u64 val)
533 {
534         struct hci_dev *hdev = data;
535
536         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
537                 return -EINVAL;
538
539         hci_dev_lock(hdev);
540         hdev->sniff_max_interval = val;
541         hci_dev_unlock(hdev);
542
543         return 0;
544 }
545
546 static int sniff_max_interval_get(void *data, u64 *val)
547 {
548         struct hci_dev *hdev = data;
549
550         hci_dev_lock(hdev);
551         *val = hdev->sniff_max_interval;
552         hci_dev_unlock(hdev);
553
554         return 0;
555 }
556
557 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
558                         sniff_max_interval_set, "%llu\n");
559
560 static int conn_info_min_age_set(void *data, u64 val)
561 {
562         struct hci_dev *hdev = data;
563
564         if (val == 0 || val > hdev->conn_info_max_age)
565                 return -EINVAL;
566
567         hci_dev_lock(hdev);
568         hdev->conn_info_min_age = val;
569         hci_dev_unlock(hdev);
570
571         return 0;
572 }
573
574 static int conn_info_min_age_get(void *data, u64 *val)
575 {
576         struct hci_dev *hdev = data;
577
578         hci_dev_lock(hdev);
579         *val = hdev->conn_info_min_age;
580         hci_dev_unlock(hdev);
581
582         return 0;
583 }
584
585 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
586                         conn_info_min_age_set, "%llu\n");
587
588 static int conn_info_max_age_set(void *data, u64 val)
589 {
590         struct hci_dev *hdev = data;
591
592         if (val == 0 || val < hdev->conn_info_min_age)
593                 return -EINVAL;
594
595         hci_dev_lock(hdev);
596         hdev->conn_info_max_age = val;
597         hci_dev_unlock(hdev);
598
599         return 0;
600 }
601
602 static int conn_info_max_age_get(void *data, u64 *val)
603 {
604         struct hci_dev *hdev = data;
605
606         hci_dev_lock(hdev);
607         *val = hdev->conn_info_max_age;
608         hci_dev_unlock(hdev);
609
610         return 0;
611 }
612
613 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
614                         conn_info_max_age_set, "%llu\n");
615
616 static int identity_show(struct seq_file *f, void *p)
617 {
618         struct hci_dev *hdev = f->private;
619         bdaddr_t addr;
620         u8 addr_type;
621
622         hci_dev_lock(hdev);
623
624         hci_copy_identity_address(hdev, &addr, &addr_type);
625
626         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
627                    16, hdev->irk, &hdev->rpa);
628
629         hci_dev_unlock(hdev);
630
631         return 0;
632 }
633
634 static int identity_open(struct inode *inode, struct file *file)
635 {
636         return single_open(file, identity_show, inode->i_private);
637 }
638
639 static const struct file_operations identity_fops = {
640         .open           = identity_open,
641         .read           = seq_read,
642         .llseek         = seq_lseek,
643         .release        = single_release,
644 };
645
646 static int random_address_show(struct seq_file *f, void *p)
647 {
648         struct hci_dev *hdev = f->private;
649
650         hci_dev_lock(hdev);
651         seq_printf(f, "%pMR\n", &hdev->random_addr);
652         hci_dev_unlock(hdev);
653
654         return 0;
655 }
656
657 static int random_address_open(struct inode *inode, struct file *file)
658 {
659         return single_open(file, random_address_show, inode->i_private);
660 }
661
662 static const struct file_operations random_address_fops = {
663         .open           = random_address_open,
664         .read           = seq_read,
665         .llseek         = seq_lseek,
666         .release        = single_release,
667 };
668
669 static int static_address_show(struct seq_file *f, void *p)
670 {
671         struct hci_dev *hdev = f->private;
672
673         hci_dev_lock(hdev);
674         seq_printf(f, "%pMR\n", &hdev->static_addr);
675         hci_dev_unlock(hdev);
676
677         return 0;
678 }
679
680 static int static_address_open(struct inode *inode, struct file *file)
681 {
682         return single_open(file, static_address_show, inode->i_private);
683 }
684
685 static const struct file_operations static_address_fops = {
686         .open           = static_address_open,
687         .read           = seq_read,
688         .llseek         = seq_lseek,
689         .release        = single_release,
690 };
691
692 static ssize_t force_static_address_read(struct file *file,
693                                          char __user *user_buf,
694                                          size_t count, loff_t *ppos)
695 {
696         struct hci_dev *hdev = file->private_data;
697         char buf[3];
698
699         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
700         buf[1] = '\n';
701         buf[2] = '\0';
702         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
703 }
704
705 static ssize_t force_static_address_write(struct file *file,
706                                           const char __user *user_buf,
707                                           size_t count, loff_t *ppos)
708 {
709         struct hci_dev *hdev = file->private_data;
710         char buf[32];
711         size_t buf_size = min(count, (sizeof(buf)-1));
712         bool enable;
713
714         if (test_bit(HCI_UP, &hdev->flags))
715                 return -EBUSY;
716
717         if (copy_from_user(buf, user_buf, buf_size))
718                 return -EFAULT;
719
720         buf[buf_size] = '\0';
721         if (strtobool(buf, &enable))
722                 return -EINVAL;
723
724         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
725                 return -EALREADY;
726
727         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
728
729         return count;
730 }
731
732 static const struct file_operations force_static_address_fops = {
733         .open           = simple_open,
734         .read           = force_static_address_read,
735         .write          = force_static_address_write,
736         .llseek         = default_llseek,
737 };
738
739 static int white_list_show(struct seq_file *f, void *ptr)
740 {
741         struct hci_dev *hdev = f->private;
742         struct bdaddr_list *b;
743
744         hci_dev_lock(hdev);
745         list_for_each_entry(b, &hdev->le_white_list, list)
746                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
747         hci_dev_unlock(hdev);
748
749         return 0;
750 }
751
752 static int white_list_open(struct inode *inode, struct file *file)
753 {
754         return single_open(file, white_list_show, inode->i_private);
755 }
756
757 static const struct file_operations white_list_fops = {
758         .open           = white_list_open,
759         .read           = seq_read,
760         .llseek         = seq_lseek,
761         .release        = single_release,
762 };
763
764 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
765 {
766         struct hci_dev *hdev = f->private;
767         struct list_head *p, *n;
768
769         hci_dev_lock(hdev);
770         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
771                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
772                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
773                            &irk->bdaddr, irk->addr_type,
774                            16, irk->val, &irk->rpa);
775         }
776         hci_dev_unlock(hdev);
777
778         return 0;
779 }
780
781 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
782 {
783         return single_open(file, identity_resolving_keys_show,
784                            inode->i_private);
785 }
786
787 static const struct file_operations identity_resolving_keys_fops = {
788         .open           = identity_resolving_keys_open,
789         .read           = seq_read,
790         .llseek         = seq_lseek,
791         .release        = single_release,
792 };
793
794 static int long_term_keys_show(struct seq_file *f, void *ptr)
795 {
796         struct hci_dev *hdev = f->private;
797         struct list_head *p, *n;
798
799         hci_dev_lock(hdev);
800         list_for_each_safe(p, n, &hdev->long_term_keys) {
801                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
802                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
803                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
804                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
805                            __le64_to_cpu(ltk->rand), 16, ltk->val);
806         }
807         hci_dev_unlock(hdev);
808
809         return 0;
810 }
811
812 static int long_term_keys_open(struct inode *inode, struct file *file)
813 {
814         return single_open(file, long_term_keys_show, inode->i_private);
815 }
816
817 static const struct file_operations long_term_keys_fops = {
818         .open           = long_term_keys_open,
819         .read           = seq_read,
820         .llseek         = seq_lseek,
821         .release        = single_release,
822 };
823
824 static int conn_min_interval_set(void *data, u64 val)
825 {
826         struct hci_dev *hdev = data;
827
828         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
829                 return -EINVAL;
830
831         hci_dev_lock(hdev);
832         hdev->le_conn_min_interval = val;
833         hci_dev_unlock(hdev);
834
835         return 0;
836 }
837
838 static int conn_min_interval_get(void *data, u64 *val)
839 {
840         struct hci_dev *hdev = data;
841
842         hci_dev_lock(hdev);
843         *val = hdev->le_conn_min_interval;
844         hci_dev_unlock(hdev);
845
846         return 0;
847 }
848
849 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
850                         conn_min_interval_set, "%llu\n");
851
852 static int conn_max_interval_set(void *data, u64 val)
853 {
854         struct hci_dev *hdev = data;
855
856         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
857                 return -EINVAL;
858
859         hci_dev_lock(hdev);
860         hdev->le_conn_max_interval = val;
861         hci_dev_unlock(hdev);
862
863         return 0;
864 }
865
866 static int conn_max_interval_get(void *data, u64 *val)
867 {
868         struct hci_dev *hdev = data;
869
870         hci_dev_lock(hdev);
871         *val = hdev->le_conn_max_interval;
872         hci_dev_unlock(hdev);
873
874         return 0;
875 }
876
877 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
878                         conn_max_interval_set, "%llu\n");
879
880 static int conn_latency_set(void *data, u64 val)
881 {
882         struct hci_dev *hdev = data;
883
884         if (val > 0x01f3)
885                 return -EINVAL;
886
887         hci_dev_lock(hdev);
888         hdev->le_conn_latency = val;
889         hci_dev_unlock(hdev);
890
891         return 0;
892 }
893
894 static int conn_latency_get(void *data, u64 *val)
895 {
896         struct hci_dev *hdev = data;
897
898         hci_dev_lock(hdev);
899         *val = hdev->le_conn_latency;
900         hci_dev_unlock(hdev);
901
902         return 0;
903 }
904
905 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
906                         conn_latency_set, "%llu\n");
907
908 static int supervision_timeout_set(void *data, u64 val)
909 {
910         struct hci_dev *hdev = data;
911
912         if (val < 0x000a || val > 0x0c80)
913                 return -EINVAL;
914
915         hci_dev_lock(hdev);
916         hdev->le_supv_timeout = val;
917         hci_dev_unlock(hdev);
918
919         return 0;
920 }
921
922 static int supervision_timeout_get(void *data, u64 *val)
923 {
924         struct hci_dev *hdev = data;
925
926         hci_dev_lock(hdev);
927         *val = hdev->le_supv_timeout;
928         hci_dev_unlock(hdev);
929
930         return 0;
931 }
932
933 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
934                         supervision_timeout_set, "%llu\n");
935
936 static int adv_channel_map_set(void *data, u64 val)
937 {
938         struct hci_dev *hdev = data;
939
940         if (val < 0x01 || val > 0x07)
941                 return -EINVAL;
942
943         hci_dev_lock(hdev);
944         hdev->le_adv_channel_map = val;
945         hci_dev_unlock(hdev);
946
947         return 0;
948 }
949
950 static int adv_channel_map_get(void *data, u64 *val)
951 {
952         struct hci_dev *hdev = data;
953
954         hci_dev_lock(hdev);
955         *val = hdev->le_adv_channel_map;
956         hci_dev_unlock(hdev);
957
958         return 0;
959 }
960
961 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
962                         adv_channel_map_set, "%llu\n");
963
964 static int device_list_show(struct seq_file *f, void *ptr)
965 {
966         struct hci_dev *hdev = f->private;
967         struct hci_conn_params *p;
968
969         hci_dev_lock(hdev);
970         list_for_each_entry(p, &hdev->le_conn_params, list) {
971                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
972                            p->auto_connect);
973         }
974         hci_dev_unlock(hdev);
975
976         return 0;
977 }
978
979 static int device_list_open(struct inode *inode, struct file *file)
980 {
981         return single_open(file, device_list_show, inode->i_private);
982 }
983
984 static const struct file_operations device_list_fops = {
985         .open           = device_list_open,
986         .read           = seq_read,
987         .llseek         = seq_lseek,
988         .release        = single_release,
989 };
990
991 /* ---- HCI requests ---- */
992
993 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
994 {
995         BT_DBG("%s result 0x%2.2x", hdev->name, result);
996
997         if (hdev->req_status == HCI_REQ_PEND) {
998                 hdev->req_result = result;
999                 hdev->req_status = HCI_REQ_DONE;
1000                 wake_up_interruptible(&hdev->req_wait_q);
1001         }
1002 }
1003
1004 static void hci_req_cancel(struct hci_dev *hdev, int err)
1005 {
1006         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1007
1008         if (hdev->req_status == HCI_REQ_PEND) {
1009                 hdev->req_result = err;
1010                 hdev->req_status = HCI_REQ_CANCELED;
1011                 wake_up_interruptible(&hdev->req_wait_q);
1012         }
1013 }
1014
1015 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1016                                             u8 event)
1017 {
1018         struct hci_ev_cmd_complete *ev;
1019         struct hci_event_hdr *hdr;
1020         struct sk_buff *skb;
1021
1022         hci_dev_lock(hdev);
1023
1024         skb = hdev->recv_evt;
1025         hdev->recv_evt = NULL;
1026
1027         hci_dev_unlock(hdev);
1028
1029         if (!skb)
1030                 return ERR_PTR(-ENODATA);
1031
1032         if (skb->len < sizeof(*hdr)) {
1033                 BT_ERR("Too short HCI event");
1034                 goto failed;
1035         }
1036
1037         hdr = (void *) skb->data;
1038         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1039
1040         if (event) {
1041                 if (hdr->evt != event)
1042                         goto failed;
1043                 return skb;
1044         }
1045
1046         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1047                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1048                 goto failed;
1049         }
1050
1051         if (skb->len < sizeof(*ev)) {
1052                 BT_ERR("Too short cmd_complete event");
1053                 goto failed;
1054         }
1055
1056         ev = (void *) skb->data;
1057         skb_pull(skb, sizeof(*ev));
1058
1059         if (opcode == __le16_to_cpu(ev->opcode))
1060                 return skb;
1061
1062         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1063                __le16_to_cpu(ev->opcode));
1064
1065 failed:
1066         kfree_skb(skb);
1067         return ERR_PTR(-ENODATA);
1068 }
1069
1070 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1071                                   const void *param, u8 event, u32 timeout)
1072 {
1073         DECLARE_WAITQUEUE(wait, current);
1074         struct hci_request req;
1075         int err = 0;
1076
1077         BT_DBG("%s", hdev->name);
1078
1079         hci_req_init(&req, hdev);
1080
1081         hci_req_add_ev(&req, opcode, plen, param, event);
1082
1083         hdev->req_status = HCI_REQ_PEND;
1084
1085         err = hci_req_run(&req, hci_req_sync_complete);
1086         if (err < 0)
1087                 return ERR_PTR(err);
1088
1089         add_wait_queue(&hdev->req_wait_q, &wait);
1090         set_current_state(TASK_INTERRUPTIBLE);
1091
1092         schedule_timeout(timeout);
1093
1094         remove_wait_queue(&hdev->req_wait_q, &wait);
1095
1096         if (signal_pending(current))
1097                 return ERR_PTR(-EINTR);
1098
1099         switch (hdev->req_status) {
1100         case HCI_REQ_DONE:
1101                 err = -bt_to_errno(hdev->req_result);
1102                 break;
1103
1104         case HCI_REQ_CANCELED:
1105                 err = -hdev->req_result;
1106                 break;
1107
1108         default:
1109                 err = -ETIMEDOUT;
1110                 break;
1111         }
1112
1113         hdev->req_status = hdev->req_result = 0;
1114
1115         BT_DBG("%s end: err %d", hdev->name, err);
1116
1117         if (err < 0)
1118                 return ERR_PTR(err);
1119
1120         return hci_get_cmd_complete(hdev, opcode, event);
1121 }
1122 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1123
1124 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1125                                const void *param, u32 timeout)
1126 {
1127         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1128 }
1129 EXPORT_SYMBOL(__hci_cmd_sync);
1130
1131 /* Execute request and wait for completion. */
1132 static int __hci_req_sync(struct hci_dev *hdev,
1133                           void (*func)(struct hci_request *req,
1134                                       unsigned long opt),
1135                           unsigned long opt, __u32 timeout)
1136 {
1137         struct hci_request req;
1138         DECLARE_WAITQUEUE(wait, current);
1139         int err = 0;
1140
1141         BT_DBG("%s start", hdev->name);
1142
1143         hci_req_init(&req, hdev);
1144
1145         hdev->req_status = HCI_REQ_PEND;
1146
1147         func(&req, opt);
1148
1149         err = hci_req_run(&req, hci_req_sync_complete);
1150         if (err < 0) {
1151                 hdev->req_status = 0;
1152
1153                 /* ENODATA means the HCI request command queue is empty.
1154                  * This can happen when a request with conditionals doesn't
1155                  * trigger any commands to be sent. This is normal behavior
1156                  * and should not trigger an error return.
1157                  */
1158                 if (err == -ENODATA)
1159                         return 0;
1160
1161                 return err;
1162         }
1163
1164         add_wait_queue(&hdev->req_wait_q, &wait);
1165         set_current_state(TASK_INTERRUPTIBLE);
1166
1167         schedule_timeout(timeout);
1168
1169         remove_wait_queue(&hdev->req_wait_q, &wait);
1170
1171         if (signal_pending(current))
1172                 return -EINTR;
1173
1174         switch (hdev->req_status) {
1175         case HCI_REQ_DONE:
1176                 err = -bt_to_errno(hdev->req_result);
1177                 break;
1178
1179         case HCI_REQ_CANCELED:
1180                 err = -hdev->req_result;
1181                 break;
1182
1183         default:
1184                 err = -ETIMEDOUT;
1185                 break;
1186         }
1187
1188         hdev->req_status = hdev->req_result = 0;
1189
1190         BT_DBG("%s end: err %d", hdev->name, err);
1191
1192         return err;
1193 }
1194
1195 static int hci_req_sync(struct hci_dev *hdev,
1196                         void (*req)(struct hci_request *req,
1197                                     unsigned long opt),
1198                         unsigned long opt, __u32 timeout)
1199 {
1200         int ret;
1201
1202         if (!test_bit(HCI_UP, &hdev->flags))
1203                 return -ENETDOWN;
1204
1205         /* Serialize all requests */
1206         hci_req_lock(hdev);
1207         ret = __hci_req_sync(hdev, req, opt, timeout);
1208         hci_req_unlock(hdev);
1209
1210         return ret;
1211 }
1212
1213 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1214 {
1215         BT_DBG("%s %ld", req->hdev->name, opt);
1216
1217         /* Reset device */
1218         set_bit(HCI_RESET, &req->hdev->flags);
1219         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1220 }
1221
1222 static void bredr_init(struct hci_request *req)
1223 {
1224         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1225
1226         /* Read Local Supported Features */
1227         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1228
1229         /* Read Local Version */
1230         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1231
1232         /* Read BD Address */
1233         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1234 }
1235
1236 static void amp_init(struct hci_request *req)
1237 {
1238         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1239
1240         /* Read Local Version */
1241         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1242
1243         /* Read Local Supported Commands */
1244         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1245
1246         /* Read Local Supported Features */
1247         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1248
1249         /* Read Local AMP Info */
1250         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1251
1252         /* Read Data Blk size */
1253         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1254
1255         /* Read Flow Control Mode */
1256         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1257
1258         /* Read Location Data */
1259         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1260 }
1261
1262 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1263 {
1264         struct hci_dev *hdev = req->hdev;
1265
1266         BT_DBG("%s %ld", hdev->name, opt);
1267
1268         /* Reset */
1269         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1270                 hci_reset_req(req, 0);
1271
1272         switch (hdev->dev_type) {
1273         case HCI_BREDR:
1274                 bredr_init(req);
1275                 break;
1276
1277         case HCI_AMP:
1278                 amp_init(req);
1279                 break;
1280
1281         default:
1282                 BT_ERR("Unknown device type %d", hdev->dev_type);
1283                 break;
1284         }
1285 }
1286
1287 static void bredr_setup(struct hci_request *req)
1288 {
1289         struct hci_dev *hdev = req->hdev;
1290
1291         __le16 param;
1292         __u8 flt_type;
1293
1294         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1295         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1296
1297         /* Read Class of Device */
1298         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1299
1300         /* Read Local Name */
1301         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1302
1303         /* Read Voice Setting */
1304         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1305
1306         /* Read Number of Supported IAC */
1307         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1308
1309         /* Read Current IAC LAP */
1310         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1311
1312         /* Clear Event Filters */
1313         flt_type = HCI_FLT_CLEAR_ALL;
1314         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1315
1316         /* Connection accept timeout ~20 secs */
1317         param = cpu_to_le16(0x7d00);
1318         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1319
1320         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1321          * but it does not support page scan related HCI commands.
1322          */
1323         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1324                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1325                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1326         }
1327 }
1328
1329 static void le_setup(struct hci_request *req)
1330 {
1331         struct hci_dev *hdev = req->hdev;
1332
1333         /* Read LE Buffer Size */
1334         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1335
1336         /* Read LE Local Supported Features */
1337         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1338
1339         /* Read LE Supported States */
1340         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1341
1342         /* Read LE Advertising Channel TX Power */
1343         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1344
1345         /* Read LE White List Size */
1346         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1347
1348         /* Clear LE White List */
1349         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1350
1351         /* LE-only controllers have LE implicitly enabled */
1352         if (!lmp_bredr_capable(hdev))
1353                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1354 }
1355
1356 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1357 {
1358         if (lmp_ext_inq_capable(hdev))
1359                 return 0x02;
1360
1361         if (lmp_inq_rssi_capable(hdev))
1362                 return 0x01;
1363
1364         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1365             hdev->lmp_subver == 0x0757)
1366                 return 0x01;
1367
1368         if (hdev->manufacturer == 15) {
1369                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1370                         return 0x01;
1371                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1372                         return 0x01;
1373                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1374                         return 0x01;
1375         }
1376
1377         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1378             hdev->lmp_subver == 0x1805)
1379                 return 0x01;
1380
1381         return 0x00;
1382 }
1383
1384 static void hci_setup_inquiry_mode(struct hci_request *req)
1385 {
1386         u8 mode;
1387
1388         mode = hci_get_inquiry_mode(req->hdev);
1389
1390         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1391 }
1392
1393 static void hci_setup_event_mask(struct hci_request *req)
1394 {
1395         struct hci_dev *hdev = req->hdev;
1396
1397         /* The second byte is 0xff instead of 0x9f (two reserved bits
1398          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1399          * command otherwise.
1400          */
1401         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1402
1403         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1404          * any event mask for pre 1.2 devices.
1405          */
1406         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1407                 return;
1408
1409         if (lmp_bredr_capable(hdev)) {
1410                 events[4] |= 0x01; /* Flow Specification Complete */
1411                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1412                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1413                 events[5] |= 0x08; /* Synchronous Connection Complete */
1414                 events[5] |= 0x10; /* Synchronous Connection Changed */
1415         } else {
1416                 /* Use a different default for LE-only devices */
1417                 memset(events, 0, sizeof(events));
1418                 events[0] |= 0x10; /* Disconnection Complete */
1419                 events[0] |= 0x80; /* Encryption Change */
1420                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1421                 events[1] |= 0x20; /* Command Complete */
1422                 events[1] |= 0x40; /* Command Status */
1423                 events[1] |= 0x80; /* Hardware Error */
1424                 events[2] |= 0x04; /* Number of Completed Packets */
1425                 events[3] |= 0x02; /* Data Buffer Overflow */
1426                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1427         }
1428
1429         if (lmp_inq_rssi_capable(hdev))
1430                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1431
1432         if (lmp_sniffsubr_capable(hdev))
1433                 events[5] |= 0x20; /* Sniff Subrating */
1434
1435         if (lmp_pause_enc_capable(hdev))
1436                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1437
1438         if (lmp_ext_inq_capable(hdev))
1439                 events[5] |= 0x40; /* Extended Inquiry Result */
1440
1441         if (lmp_no_flush_capable(hdev))
1442                 events[7] |= 0x01; /* Enhanced Flush Complete */
1443
1444         if (lmp_lsto_capable(hdev))
1445                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1446
1447         if (lmp_ssp_capable(hdev)) {
1448                 events[6] |= 0x01;      /* IO Capability Request */
1449                 events[6] |= 0x02;      /* IO Capability Response */
1450                 events[6] |= 0x04;      /* User Confirmation Request */
1451                 events[6] |= 0x08;      /* User Passkey Request */
1452                 events[6] |= 0x10;      /* Remote OOB Data Request */
1453                 events[6] |= 0x20;      /* Simple Pairing Complete */
1454                 events[7] |= 0x04;      /* User Passkey Notification */
1455                 events[7] |= 0x08;      /* Keypress Notification */
1456                 events[7] |= 0x10;      /* Remote Host Supported
1457                                          * Features Notification
1458                                          */
1459         }
1460
1461         if (lmp_le_capable(hdev))
1462                 events[7] |= 0x20;      /* LE Meta-Event */
1463
1464         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1465 }
1466
1467 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1468 {
1469         struct hci_dev *hdev = req->hdev;
1470
1471         if (lmp_bredr_capable(hdev))
1472                 bredr_setup(req);
1473         else
1474                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1475
1476         if (lmp_le_capable(hdev))
1477                 le_setup(req);
1478
1479         hci_setup_event_mask(req);
1480
1481         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1482          * local supported commands HCI command.
1483          */
1484         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1485                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1486
1487         if (lmp_ssp_capable(hdev)) {
1488                 /* When SSP is available, then the host features page
1489                  * should also be available as well. However some
1490                  * controllers list the max_page as 0 as long as SSP
1491                  * has not been enabled. To achieve proper debugging
1492                  * output, force the minimum max_page to 1 at least.
1493                  */
1494                 hdev->max_page = 0x01;
1495
1496                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1497                         u8 mode = 0x01;
1498                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1499                                     sizeof(mode), &mode);
1500                 } else {
1501                         struct hci_cp_write_eir cp;
1502
1503                         memset(hdev->eir, 0, sizeof(hdev->eir));
1504                         memset(&cp, 0, sizeof(cp));
1505
1506                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1507                 }
1508         }
1509
1510         if (lmp_inq_rssi_capable(hdev))
1511                 hci_setup_inquiry_mode(req);
1512
1513         if (lmp_inq_tx_pwr_capable(hdev))
1514                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1515
1516         if (lmp_ext_feat_capable(hdev)) {
1517                 struct hci_cp_read_local_ext_features cp;
1518
1519                 cp.page = 0x01;
1520                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1521                             sizeof(cp), &cp);
1522         }
1523
1524         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1525                 u8 enable = 1;
1526                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1527                             &enable);
1528         }
1529 }
1530
1531 static void hci_setup_link_policy(struct hci_request *req)
1532 {
1533         struct hci_dev *hdev = req->hdev;
1534         struct hci_cp_write_def_link_policy cp;
1535         u16 link_policy = 0;
1536
1537         if (lmp_rswitch_capable(hdev))
1538                 link_policy |= HCI_LP_RSWITCH;
1539         if (lmp_hold_capable(hdev))
1540                 link_policy |= HCI_LP_HOLD;
1541         if (lmp_sniff_capable(hdev))
1542                 link_policy |= HCI_LP_SNIFF;
1543         if (lmp_park_capable(hdev))
1544                 link_policy |= HCI_LP_PARK;
1545
1546         cp.policy = cpu_to_le16(link_policy);
1547         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1548 }
1549
1550 static void hci_set_le_support(struct hci_request *req)
1551 {
1552         struct hci_dev *hdev = req->hdev;
1553         struct hci_cp_write_le_host_supported cp;
1554
1555         /* LE-only devices do not support explicit enablement */
1556         if (!lmp_bredr_capable(hdev))
1557                 return;
1558
1559         memset(&cp, 0, sizeof(cp));
1560
1561         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1562                 cp.le = 0x01;
1563                 cp.simul = lmp_le_br_capable(hdev);
1564         }
1565
1566         if (cp.le != lmp_host_le_capable(hdev))
1567                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1568                             &cp);
1569 }
1570
1571 static void hci_set_event_mask_page_2(struct hci_request *req)
1572 {
1573         struct hci_dev *hdev = req->hdev;
1574         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1575
1576         /* If Connectionless Slave Broadcast master role is supported
1577          * enable all necessary events for it.
1578          */
1579         if (lmp_csb_master_capable(hdev)) {
1580                 events[1] |= 0x40;      /* Triggered Clock Capture */
1581                 events[1] |= 0x80;      /* Synchronization Train Complete */
1582                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1583                 events[2] |= 0x20;      /* CSB Channel Map Change */
1584         }
1585
1586         /* If Connectionless Slave Broadcast slave role is supported
1587          * enable all necessary events for it.
1588          */
1589         if (lmp_csb_slave_capable(hdev)) {
1590                 events[2] |= 0x01;      /* Synchronization Train Received */
1591                 events[2] |= 0x02;      /* CSB Receive */
1592                 events[2] |= 0x04;      /* CSB Timeout */
1593                 events[2] |= 0x08;      /* Truncated Page Complete */
1594         }
1595
1596         /* Enable Authenticated Payload Timeout Expired event if supported */
1597         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1598                 events[2] |= 0x80;
1599
1600         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1601 }
1602
1603 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1604 {
1605         struct hci_dev *hdev = req->hdev;
1606         u8 p;
1607
1608         /* Some Broadcom based Bluetooth controllers do not support the
1609          * Delete Stored Link Key command. They are clearly indicating its
1610          * absence in the bit mask of supported commands.
1611          *
1612          * Check the supported commands and only if the the command is marked
1613          * as supported send it. If not supported assume that the controller
1614          * does not have actual support for stored link keys which makes this
1615          * command redundant anyway.
1616          *
1617          * Some controllers indicate that they support handling deleting
1618          * stored link keys, but they don't. The quirk lets a driver
1619          * just disable this command.
1620          */
1621         if (hdev->commands[6] & 0x80 &&
1622             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1623                 struct hci_cp_delete_stored_link_key cp;
1624
1625                 bacpy(&cp.bdaddr, BDADDR_ANY);
1626                 cp.delete_all = 0x01;
1627                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1628                             sizeof(cp), &cp);
1629         }
1630
1631         if (hdev->commands[5] & 0x10)
1632                 hci_setup_link_policy(req);
1633
1634         if (lmp_le_capable(hdev)) {
1635                 u8 events[8];
1636
1637                 memset(events, 0, sizeof(events));
1638                 events[0] = 0x1f;
1639
1640                 /* If controller supports the Connection Parameters Request
1641                  * Link Layer Procedure, enable the corresponding event.
1642                  */
1643                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1644                         events[0] |= 0x20;      /* LE Remote Connection
1645                                                  * Parameter Request
1646                                                  */
1647
1648                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1649                             events);
1650
1651                 hci_set_le_support(req);
1652         }
1653
1654         /* Read features beyond page 1 if available */
1655         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1656                 struct hci_cp_read_local_ext_features cp;
1657
1658                 cp.page = p;
1659                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1660                             sizeof(cp), &cp);
1661         }
1662 }
1663
1664 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1665 {
1666         struct hci_dev *hdev = req->hdev;
1667
1668         /* Set event mask page 2 if the HCI command for it is supported */
1669         if (hdev->commands[22] & 0x04)
1670                 hci_set_event_mask_page_2(req);
1671
1672         /* Check for Synchronization Train support */
1673         if (lmp_sync_train_capable(hdev))
1674                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1675
1676         /* Enable Secure Connections if supported and configured */
1677         if ((lmp_sc_capable(hdev) ||
1678              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1679             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1680                 u8 support = 0x01;
1681                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1682                             sizeof(support), &support);
1683         }
1684 }
1685
1686 static int __hci_init(struct hci_dev *hdev)
1687 {
1688         int err;
1689
1690         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1691         if (err < 0)
1692                 return err;
1693
1694         /* The Device Under Test (DUT) mode is special and available for
1695          * all controller types. So just create it early on.
1696          */
1697         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1698                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1699                                     &dut_mode_fops);
1700         }
1701
1702         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1703          * BR/EDR/LE type controllers. AMP controllers only need the
1704          * first stage init.
1705          */
1706         if (hdev->dev_type != HCI_BREDR)
1707                 return 0;
1708
1709         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1710         if (err < 0)
1711                 return err;
1712
1713         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1714         if (err < 0)
1715                 return err;
1716
1717         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1718         if (err < 0)
1719                 return err;
1720
1721         /* Only create debugfs entries during the initial setup
1722          * phase and not every time the controller gets powered on.
1723          */
1724         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1725                 return 0;
1726
1727         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1728                             &features_fops);
1729         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1730                            &hdev->manufacturer);
1731         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1732         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1733         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1734                             &blacklist_fops);
1735         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1736                             &whitelist_fops);
1737         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1738
1739         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1740                             &conn_info_min_age_fops);
1741         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1742                             &conn_info_max_age_fops);
1743
1744         if (lmp_bredr_capable(hdev)) {
1745                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1746                                     hdev, &inquiry_cache_fops);
1747                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1748                                     hdev, &link_keys_fops);
1749                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1750                                     hdev, &dev_class_fops);
1751                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1752                                     hdev, &voice_setting_fops);
1753         }
1754
1755         if (lmp_ssp_capable(hdev)) {
1756                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1757                                     hdev, &auto_accept_delay_fops);
1758                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1759                                     hdev, &force_sc_support_fops);
1760                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1761                                     hdev, &sc_only_mode_fops);
1762         }
1763
1764         if (lmp_sniff_capable(hdev)) {
1765                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1766                                     hdev, &idle_timeout_fops);
1767                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1768                                     hdev, &sniff_min_interval_fops);
1769                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1770                                     hdev, &sniff_max_interval_fops);
1771         }
1772
1773         if (lmp_le_capable(hdev)) {
1774                 debugfs_create_file("identity", 0400, hdev->debugfs,
1775                                     hdev, &identity_fops);
1776                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1777                                     hdev, &rpa_timeout_fops);
1778                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1779                                     hdev, &random_address_fops);
1780                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1781                                     hdev, &static_address_fops);
1782
1783                 /* For controllers with a public address, provide a debug
1784                  * option to force the usage of the configured static
1785                  * address. By default the public address is used.
1786                  */
1787                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1788                         debugfs_create_file("force_static_address", 0644,
1789                                             hdev->debugfs, hdev,
1790                                             &force_static_address_fops);
1791
1792                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1793                                   &hdev->le_white_list_size);
1794                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1795                                     &white_list_fops);
1796                 debugfs_create_file("identity_resolving_keys", 0400,
1797                                     hdev->debugfs, hdev,
1798                                     &identity_resolving_keys_fops);
1799                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1800                                     hdev, &long_term_keys_fops);
1801                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1802                                     hdev, &conn_min_interval_fops);
1803                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1804                                     hdev, &conn_max_interval_fops);
1805                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1806                                     hdev, &conn_latency_fops);
1807                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1808                                     hdev, &supervision_timeout_fops);
1809                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1810                                     hdev, &adv_channel_map_fops);
1811                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1812                                     &device_list_fops);
1813                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1814                                    hdev->debugfs,
1815                                    &hdev->discov_interleaved_timeout);
1816         }
1817
1818         return 0;
1819 }
1820
1821 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1822 {
1823         struct hci_dev *hdev = req->hdev;
1824
1825         BT_DBG("%s %ld", hdev->name, opt);
1826
1827         /* Reset */
1828         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1829                 hci_reset_req(req, 0);
1830
1831         /* Read Local Version */
1832         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1833
1834         /* Read BD Address */
1835         if (hdev->set_bdaddr)
1836                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1837 }
1838
1839 static int __hci_unconf_init(struct hci_dev *hdev)
1840 {
1841         int err;
1842
1843         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1844                 return 0;
1845
1846         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1847         if (err < 0)
1848                 return err;
1849
1850         return 0;
1851 }
1852
1853 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1854 {
1855         __u8 scan = opt;
1856
1857         BT_DBG("%s %x", req->hdev->name, scan);
1858
1859         /* Inquiry and Page scans */
1860         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1861 }
1862
1863 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1864 {
1865         __u8 auth = opt;
1866
1867         BT_DBG("%s %x", req->hdev->name, auth);
1868
1869         /* Authentication */
1870         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1871 }
1872
1873 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1874 {
1875         __u8 encrypt = opt;
1876
1877         BT_DBG("%s %x", req->hdev->name, encrypt);
1878
1879         /* Encryption */
1880         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1881 }
1882
1883 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1884 {
1885         __le16 policy = cpu_to_le16(opt);
1886
1887         BT_DBG("%s %x", req->hdev->name, policy);
1888
1889         /* Default link policy */
1890         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1891 }
1892
1893 /* Get HCI device by index.
1894  * Device is held on return. */
1895 struct hci_dev *hci_dev_get(int index)
1896 {
1897         struct hci_dev *hdev = NULL, *d;
1898
1899         BT_DBG("%d", index);
1900
1901         if (index < 0)
1902                 return NULL;
1903
1904         read_lock(&hci_dev_list_lock);
1905         list_for_each_entry(d, &hci_dev_list, list) {
1906                 if (d->id == index) {
1907                         hdev = hci_dev_hold(d);
1908                         break;
1909                 }
1910         }
1911         read_unlock(&hci_dev_list_lock);
1912         return hdev;
1913 }
1914
1915 /* ---- Inquiry support ---- */
1916
1917 bool hci_discovery_active(struct hci_dev *hdev)
1918 {
1919         struct discovery_state *discov = &hdev->discovery;
1920
1921         switch (discov->state) {
1922         case DISCOVERY_FINDING:
1923         case DISCOVERY_RESOLVING:
1924                 return true;
1925
1926         default:
1927                 return false;
1928         }
1929 }
1930
1931 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1932 {
1933         int old_state = hdev->discovery.state;
1934
1935         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1936
1937         if (old_state == state)
1938                 return;
1939
1940         hdev->discovery.state = state;
1941
1942         switch (state) {
1943         case DISCOVERY_STOPPED:
1944                 hci_update_background_scan(hdev);
1945
1946                 if (old_state != DISCOVERY_STARTING)
1947                         mgmt_discovering(hdev, 0);
1948                 break;
1949         case DISCOVERY_STARTING:
1950                 break;
1951         case DISCOVERY_FINDING:
1952                 mgmt_discovering(hdev, 1);
1953                 break;
1954         case DISCOVERY_RESOLVING:
1955                 break;
1956         case DISCOVERY_STOPPING:
1957                 break;
1958         }
1959 }
1960
1961 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1962 {
1963         struct discovery_state *cache = &hdev->discovery;
1964         struct inquiry_entry *p, *n;
1965
1966         list_for_each_entry_safe(p, n, &cache->all, all) {
1967                 list_del(&p->all);
1968                 kfree(p);
1969         }
1970
1971         INIT_LIST_HEAD(&cache->unknown);
1972         INIT_LIST_HEAD(&cache->resolve);
1973 }
1974
1975 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1976                                                bdaddr_t *bdaddr)
1977 {
1978         struct discovery_state *cache = &hdev->discovery;
1979         struct inquiry_entry *e;
1980
1981         BT_DBG("cache %p, %pMR", cache, bdaddr);
1982
1983         list_for_each_entry(e, &cache->all, all) {
1984                 if (!bacmp(&e->data.bdaddr, bdaddr))
1985                         return e;
1986         }
1987
1988         return NULL;
1989 }
1990
1991 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1992                                                        bdaddr_t *bdaddr)
1993 {
1994         struct discovery_state *cache = &hdev->discovery;
1995         struct inquiry_entry *e;
1996
1997         BT_DBG("cache %p, %pMR", cache, bdaddr);
1998
1999         list_for_each_entry(e, &cache->unknown, list) {
2000                 if (!bacmp(&e->data.bdaddr, bdaddr))
2001                         return e;
2002         }
2003
2004         return NULL;
2005 }
2006
2007 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2008                                                        bdaddr_t *bdaddr,
2009                                                        int state)
2010 {
2011         struct discovery_state *cache = &hdev->discovery;
2012         struct inquiry_entry *e;
2013
2014         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2015
2016         list_for_each_entry(e, &cache->resolve, list) {
2017                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2018                         return e;
2019                 if (!bacmp(&e->data.bdaddr, bdaddr))
2020                         return e;
2021         }
2022
2023         return NULL;
2024 }
2025
2026 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2027                                       struct inquiry_entry *ie)
2028 {
2029         struct discovery_state *cache = &hdev->discovery;
2030         struct list_head *pos = &cache->resolve;
2031         struct inquiry_entry *p;
2032
2033         list_del(&ie->list);
2034
2035         list_for_each_entry(p, &cache->resolve, list) {
2036                 if (p->name_state != NAME_PENDING &&
2037                     abs(p->data.rssi) >= abs(ie->data.rssi))
2038                         break;
2039                 pos = &p->list;
2040         }
2041
2042         list_add(&ie->list, pos);
2043 }
2044
2045 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2046                              bool name_known)
2047 {
2048         struct discovery_state *cache = &hdev->discovery;
2049         struct inquiry_entry *ie;
2050         u32 flags = 0;
2051
2052         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2053
2054         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2055
2056         if (!data->ssp_mode)
2057                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2058
2059         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2060         if (ie) {
2061                 if (!ie->data.ssp_mode)
2062                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2063
2064                 if (ie->name_state == NAME_NEEDED &&
2065                     data->rssi != ie->data.rssi) {
2066                         ie->data.rssi = data->rssi;
2067                         hci_inquiry_cache_update_resolve(hdev, ie);
2068                 }
2069
2070                 goto update;
2071         }
2072
2073         /* Entry not in the cache. Add new one. */
2074         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2075         if (!ie) {
2076                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2077                 goto done;
2078         }
2079
2080         list_add(&ie->all, &cache->all);
2081
2082         if (name_known) {
2083                 ie->name_state = NAME_KNOWN;
2084         } else {
2085                 ie->name_state = NAME_NOT_KNOWN;
2086                 list_add(&ie->list, &cache->unknown);
2087         }
2088
2089 update:
2090         if (name_known && ie->name_state != NAME_KNOWN &&
2091             ie->name_state != NAME_PENDING) {
2092                 ie->name_state = NAME_KNOWN;
2093                 list_del(&ie->list);
2094         }
2095
2096         memcpy(&ie->data, data, sizeof(*data));
2097         ie->timestamp = jiffies;
2098         cache->timestamp = jiffies;
2099
2100         if (ie->name_state == NAME_NOT_KNOWN)
2101                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2102
2103 done:
2104         return flags;
2105 }
2106
2107 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2108 {
2109         struct discovery_state *cache = &hdev->discovery;
2110         struct inquiry_info *info = (struct inquiry_info *) buf;
2111         struct inquiry_entry *e;
2112         int copied = 0;
2113
2114         list_for_each_entry(e, &cache->all, all) {
2115                 struct inquiry_data *data = &e->data;
2116
2117                 if (copied >= num)
2118                         break;
2119
2120                 bacpy(&info->bdaddr, &data->bdaddr);
2121                 info->pscan_rep_mode    = data->pscan_rep_mode;
2122                 info->pscan_period_mode = data->pscan_period_mode;
2123                 info->pscan_mode        = data->pscan_mode;
2124                 memcpy(info->dev_class, data->dev_class, 3);
2125                 info->clock_offset      = data->clock_offset;
2126
2127                 info++;
2128                 copied++;
2129         }
2130
2131         BT_DBG("cache %p, copied %d", cache, copied);
2132         return copied;
2133 }
2134
2135 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2136 {
2137         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2138         struct hci_dev *hdev = req->hdev;
2139         struct hci_cp_inquiry cp;
2140
2141         BT_DBG("%s", hdev->name);
2142
2143         if (test_bit(HCI_INQUIRY, &hdev->flags))
2144                 return;
2145
2146         /* Start Inquiry */
2147         memcpy(&cp.lap, &ir->lap, 3);
2148         cp.length  = ir->length;
2149         cp.num_rsp = ir->num_rsp;
2150         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2151 }
2152
2153 static int wait_inquiry(void *word)
2154 {
2155         schedule();
2156         return signal_pending(current);
2157 }
2158
2159 int hci_inquiry(void __user *arg)
2160 {
2161         __u8 __user *ptr = arg;
2162         struct hci_inquiry_req ir;
2163         struct hci_dev *hdev;
2164         int err = 0, do_inquiry = 0, max_rsp;
2165         long timeo;
2166         __u8 *buf;
2167
2168         if (copy_from_user(&ir, ptr, sizeof(ir)))
2169                 return -EFAULT;
2170
2171         hdev = hci_dev_get(ir.dev_id);
2172         if (!hdev)
2173                 return -ENODEV;
2174
2175         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2176                 err = -EBUSY;
2177                 goto done;
2178         }
2179
2180         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2181                 err = -EOPNOTSUPP;
2182                 goto done;
2183         }
2184
2185         if (hdev->dev_type != HCI_BREDR) {
2186                 err = -EOPNOTSUPP;
2187                 goto done;
2188         }
2189
2190         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2191                 err = -EOPNOTSUPP;
2192                 goto done;
2193         }
2194
2195         hci_dev_lock(hdev);
2196         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2197             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2198                 hci_inquiry_cache_flush(hdev);
2199                 do_inquiry = 1;
2200         }
2201         hci_dev_unlock(hdev);
2202
2203         timeo = ir.length * msecs_to_jiffies(2000);
2204
2205         if (do_inquiry) {
2206                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2207                                    timeo);
2208                 if (err < 0)
2209                         goto done;
2210
2211                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2212                  * cleared). If it is interrupted by a signal, return -EINTR.
2213                  */
2214                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2215                                 TASK_INTERRUPTIBLE))
2216                         return -EINTR;
2217         }
2218
2219         /* for unlimited number of responses we will use buffer with
2220          * 255 entries
2221          */
2222         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2223
2224         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2225          * copy it to the user space.
2226          */
2227         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2228         if (!buf) {
2229                 err = -ENOMEM;
2230                 goto done;
2231         }
2232
2233         hci_dev_lock(hdev);
2234         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2235         hci_dev_unlock(hdev);
2236
2237         BT_DBG("num_rsp %d", ir.num_rsp);
2238
2239         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2240                 ptr += sizeof(ir);
2241                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2242                                  ir.num_rsp))
2243                         err = -EFAULT;
2244         } else
2245                 err = -EFAULT;
2246
2247         kfree(buf);
2248
2249 done:
2250         hci_dev_put(hdev);
2251         return err;
2252 }
2253
2254 static int hci_dev_do_open(struct hci_dev *hdev)
2255 {
2256         int ret = 0;
2257
2258         BT_DBG("%s %p", hdev->name, hdev);
2259
2260         hci_req_lock(hdev);
2261
2262         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2263                 ret = -ENODEV;
2264                 goto done;
2265         }
2266
2267         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2268             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2269                 /* Check for rfkill but allow the HCI setup stage to
2270                  * proceed (which in itself doesn't cause any RF activity).
2271                  */
2272                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2273                         ret = -ERFKILL;
2274                         goto done;
2275                 }
2276
2277                 /* Check for valid public address or a configured static
2278                  * random adddress, but let the HCI setup proceed to
2279                  * be able to determine if there is a public address
2280                  * or not.
2281                  *
2282                  * In case of user channel usage, it is not important
2283                  * if a public address or static random address is
2284                  * available.
2285                  *
2286                  * This check is only valid for BR/EDR controllers
2287                  * since AMP controllers do not have an address.
2288                  */
2289                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2290                     hdev->dev_type == HCI_BREDR &&
2291                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2292                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2293                         ret = -EADDRNOTAVAIL;
2294                         goto done;
2295                 }
2296         }
2297
2298         if (test_bit(HCI_UP, &hdev->flags)) {
2299                 ret = -EALREADY;
2300                 goto done;
2301         }
2302
2303         if (hdev->open(hdev)) {
2304                 ret = -EIO;
2305                 goto done;
2306         }
2307
2308         atomic_set(&hdev->cmd_cnt, 1);
2309         set_bit(HCI_INIT, &hdev->flags);
2310
2311         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2312                 if (hdev->setup)
2313                         ret = hdev->setup(hdev);
2314
2315                 /* The transport driver can set these quirks before
2316                  * creating the HCI device or in its setup callback.
2317                  *
2318                  * In case any of them is set, the controller has to
2319                  * start up as unconfigured.
2320                  */
2321                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2322                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2323                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2324
2325                 /* For an unconfigured controller it is required to
2326                  * read at least the version information provided by
2327                  * the Read Local Version Information command.
2328                  *
2329                  * If the set_bdaddr driver callback is provided, then
2330                  * also the original Bluetooth public device address
2331                  * will be read using the Read BD Address command.
2332                  */
2333                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2334                         ret = __hci_unconf_init(hdev);
2335         }
2336
2337         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2338                 /* If public address change is configured, ensure that
2339                  * the address gets programmed. If the driver does not
2340                  * support changing the public address, fail the power
2341                  * on procedure.
2342                  */
2343                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2344                     hdev->set_bdaddr)
2345                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2346                 else
2347                         ret = -EADDRNOTAVAIL;
2348         }
2349
2350         if (!ret) {
2351                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2352                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2353                         ret = __hci_init(hdev);
2354         }
2355
2356         clear_bit(HCI_INIT, &hdev->flags);
2357
2358         if (!ret) {
2359                 hci_dev_hold(hdev);
2360                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2361                 set_bit(HCI_UP, &hdev->flags);
2362                 hci_notify(hdev, HCI_DEV_UP);
2363                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2364                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2365                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2366                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2367                     hdev->dev_type == HCI_BREDR) {
2368                         hci_dev_lock(hdev);
2369                         mgmt_powered(hdev, 1);
2370                         hci_dev_unlock(hdev);
2371                 }
2372         } else {
2373                 /* Init failed, cleanup */
2374                 flush_work(&hdev->tx_work);
2375                 flush_work(&hdev->cmd_work);
2376                 flush_work(&hdev->rx_work);
2377
2378                 skb_queue_purge(&hdev->cmd_q);
2379                 skb_queue_purge(&hdev->rx_q);
2380
2381                 if (hdev->flush)
2382                         hdev->flush(hdev);
2383
2384                 if (hdev->sent_cmd) {
2385                         kfree_skb(hdev->sent_cmd);
2386                         hdev->sent_cmd = NULL;
2387                 }
2388
2389                 hdev->close(hdev);
2390                 hdev->flags &= BIT(HCI_RAW);
2391         }
2392
2393 done:
2394         hci_req_unlock(hdev);
2395         return ret;
2396 }
2397
2398 /* ---- HCI ioctl helpers ---- */
2399
2400 int hci_dev_open(__u16 dev)
2401 {
2402         struct hci_dev *hdev;
2403         int err;
2404
2405         hdev = hci_dev_get(dev);
2406         if (!hdev)
2407                 return -ENODEV;
2408
2409         /* Devices that are marked as unconfigured can only be powered
2410          * up as user channel. Trying to bring them up as normal devices
2411          * will result into a failure. Only user channel operation is
2412          * possible.
2413          *
2414          * When this function is called for a user channel, the flag
2415          * HCI_USER_CHANNEL will be set first before attempting to
2416          * open the device.
2417          */
2418         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2419             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2420                 err = -EOPNOTSUPP;
2421                 goto done;
2422         }
2423
2424         /* We need to ensure that no other power on/off work is pending
2425          * before proceeding to call hci_dev_do_open. This is
2426          * particularly important if the setup procedure has not yet
2427          * completed.
2428          */
2429         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2430                 cancel_delayed_work(&hdev->power_off);
2431
2432         /* After this call it is guaranteed that the setup procedure
2433          * has finished. This means that error conditions like RFKILL
2434          * or no valid public or static random address apply.
2435          */
2436         flush_workqueue(hdev->req_workqueue);
2437
2438         err = hci_dev_do_open(hdev);
2439
2440 done:
2441         hci_dev_put(hdev);
2442         return err;
2443 }
2444
2445 /* This function requires the caller holds hdev->lock */
2446 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2447 {
2448         struct hci_conn_params *p;
2449
2450         list_for_each_entry(p, &hdev->le_conn_params, list)
2451                 list_del_init(&p->action);
2452
2453         BT_DBG("All LE pending actions cleared");
2454 }
2455
2456 static int hci_dev_do_close(struct hci_dev *hdev)
2457 {
2458         BT_DBG("%s %p", hdev->name, hdev);
2459
2460         cancel_delayed_work(&hdev->power_off);
2461
2462         hci_req_cancel(hdev, ENODEV);
2463         hci_req_lock(hdev);
2464
2465         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2466                 cancel_delayed_work_sync(&hdev->cmd_timer);
2467                 hci_req_unlock(hdev);
2468                 return 0;
2469         }
2470
2471         /* Flush RX and TX works */
2472         flush_work(&hdev->tx_work);
2473         flush_work(&hdev->rx_work);
2474
2475         if (hdev->discov_timeout > 0) {
2476                 cancel_delayed_work(&hdev->discov_off);
2477                 hdev->discov_timeout = 0;
2478                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2479                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2480         }
2481
2482         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2483                 cancel_delayed_work(&hdev->service_cache);
2484
2485         cancel_delayed_work_sync(&hdev->le_scan_disable);
2486
2487         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2488                 cancel_delayed_work_sync(&hdev->rpa_expired);
2489
2490         hci_dev_lock(hdev);
2491         hci_inquiry_cache_flush(hdev);
2492         hci_conn_hash_flush(hdev);
2493         hci_pend_le_actions_clear(hdev);
2494         hci_dev_unlock(hdev);
2495
2496         hci_notify(hdev, HCI_DEV_DOWN);
2497
2498         if (hdev->flush)
2499                 hdev->flush(hdev);
2500
2501         /* Reset device */
2502         skb_queue_purge(&hdev->cmd_q);
2503         atomic_set(&hdev->cmd_cnt, 1);
2504         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2505             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2506             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2507                 set_bit(HCI_INIT, &hdev->flags);
2508                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2509                 clear_bit(HCI_INIT, &hdev->flags);
2510         }
2511
2512         /* flush cmd  work */
2513         flush_work(&hdev->cmd_work);
2514
2515         /* Drop queues */
2516         skb_queue_purge(&hdev->rx_q);
2517         skb_queue_purge(&hdev->cmd_q);
2518         skb_queue_purge(&hdev->raw_q);
2519
2520         /* Drop last sent command */
2521         if (hdev->sent_cmd) {
2522                 cancel_delayed_work_sync(&hdev->cmd_timer);
2523                 kfree_skb(hdev->sent_cmd);
2524                 hdev->sent_cmd = NULL;
2525         }
2526
2527         kfree_skb(hdev->recv_evt);
2528         hdev->recv_evt = NULL;
2529
2530         /* After this point our queues are empty
2531          * and no tasks are scheduled. */
2532         hdev->close(hdev);
2533
2534         /* Clear flags */
2535         hdev->flags &= BIT(HCI_RAW);
2536         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2537
2538         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2539                 if (hdev->dev_type == HCI_BREDR) {
2540                         hci_dev_lock(hdev);
2541                         mgmt_powered(hdev, 0);
2542                         hci_dev_unlock(hdev);
2543                 }
2544         }
2545
2546         /* Controller radio is available but is currently powered down */
2547         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2548
2549         memset(hdev->eir, 0, sizeof(hdev->eir));
2550         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2551         bacpy(&hdev->random_addr, BDADDR_ANY);
2552
2553         hci_req_unlock(hdev);
2554
2555         hci_dev_put(hdev);
2556         return 0;
2557 }
2558
2559 int hci_dev_close(__u16 dev)
2560 {
2561         struct hci_dev *hdev;
2562         int err;
2563
2564         hdev = hci_dev_get(dev);
2565         if (!hdev)
2566                 return -ENODEV;
2567
2568         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2569                 err = -EBUSY;
2570                 goto done;
2571         }
2572
2573         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2574                 cancel_delayed_work(&hdev->power_off);
2575
2576         err = hci_dev_do_close(hdev);
2577
2578 done:
2579         hci_dev_put(hdev);
2580         return err;
2581 }
2582
2583 int hci_dev_reset(__u16 dev)
2584 {
2585         struct hci_dev *hdev;
2586         int ret = 0;
2587
2588         hdev = hci_dev_get(dev);
2589         if (!hdev)
2590                 return -ENODEV;
2591
2592         hci_req_lock(hdev);
2593
2594         if (!test_bit(HCI_UP, &hdev->flags)) {
2595                 ret = -ENETDOWN;
2596                 goto done;
2597         }
2598
2599         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2600                 ret = -EBUSY;
2601                 goto done;
2602         }
2603
2604         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2605                 ret = -EOPNOTSUPP;
2606                 goto done;
2607         }
2608
2609         /* Drop queues */
2610         skb_queue_purge(&hdev->rx_q);
2611         skb_queue_purge(&hdev->cmd_q);
2612
2613         hci_dev_lock(hdev);
2614         hci_inquiry_cache_flush(hdev);
2615         hci_conn_hash_flush(hdev);
2616         hci_dev_unlock(hdev);
2617
2618         if (hdev->flush)
2619                 hdev->flush(hdev);
2620
2621         atomic_set(&hdev->cmd_cnt, 1);
2622         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2623
2624         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2625
2626 done:
2627         hci_req_unlock(hdev);
2628         hci_dev_put(hdev);
2629         return ret;
2630 }
2631
2632 int hci_dev_reset_stat(__u16 dev)
2633 {
2634         struct hci_dev *hdev;
2635         int ret = 0;
2636
2637         hdev = hci_dev_get(dev);
2638         if (!hdev)
2639                 return -ENODEV;
2640
2641         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2642                 ret = -EBUSY;
2643                 goto done;
2644         }
2645
2646         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2647                 ret = -EOPNOTSUPP;
2648                 goto done;
2649         }
2650
2651         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2652
2653 done:
2654         hci_dev_put(hdev);
2655         return ret;
2656 }
2657
2658 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2659 {
2660         struct hci_dev *hdev;
2661         struct hci_dev_req dr;
2662         int err = 0;
2663
2664         if (copy_from_user(&dr, arg, sizeof(dr)))
2665                 return -EFAULT;
2666
2667         hdev = hci_dev_get(dr.dev_id);
2668         if (!hdev)
2669                 return -ENODEV;
2670
2671         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2672                 err = -EBUSY;
2673                 goto done;
2674         }
2675
2676         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2677                 err = -EOPNOTSUPP;
2678                 goto done;
2679         }
2680
2681         if (hdev->dev_type != HCI_BREDR) {
2682                 err = -EOPNOTSUPP;
2683                 goto done;
2684         }
2685
2686         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2687                 err = -EOPNOTSUPP;
2688                 goto done;
2689         }
2690
2691         switch (cmd) {
2692         case HCISETAUTH:
2693                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2694                                    HCI_INIT_TIMEOUT);
2695                 break;
2696
2697         case HCISETENCRYPT:
2698                 if (!lmp_encrypt_capable(hdev)) {
2699                         err = -EOPNOTSUPP;
2700                         break;
2701                 }
2702
2703                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2704                         /* Auth must be enabled first */
2705                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2706                                            HCI_INIT_TIMEOUT);
2707                         if (err)
2708                                 break;
2709                 }
2710
2711                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2712                                    HCI_INIT_TIMEOUT);
2713                 break;
2714
2715         case HCISETSCAN:
2716                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2717                                    HCI_INIT_TIMEOUT);
2718
2719                 /* Ensure that the connectable state gets correctly
2720                  * notified if the whitelist is in use.
2721                  */
2722                 if (!err && !list_empty(&hdev->whitelist)) {
2723                         bool changed;
2724
2725                         if ((dr.dev_opt & SCAN_PAGE))
2726                                 changed = !test_and_set_bit(HCI_CONNECTABLE,
2727                                                             &hdev->dev_flags);
2728                         else
2729                                 changed = test_and_set_bit(HCI_CONNECTABLE,
2730                                                            &hdev->dev_flags);
2731
2732                         if (changed)
2733                                 mgmt_new_settings(hdev);
2734                 }
2735                 break;
2736
2737         case HCISETLINKPOL:
2738                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2739                                    HCI_INIT_TIMEOUT);
2740                 break;
2741
2742         case HCISETLINKMODE:
2743                 hdev->link_mode = ((__u16) dr.dev_opt) &
2744                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2745                 break;
2746
2747         case HCISETPTYPE:
2748                 hdev->pkt_type = (__u16) dr.dev_opt;
2749                 break;
2750
2751         case HCISETACLMTU:
2752                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2753                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2754                 break;
2755
2756         case HCISETSCOMTU:
2757                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2758                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2759                 break;
2760
2761         default:
2762                 err = -EINVAL;
2763                 break;
2764         }
2765
2766 done:
2767         hci_dev_put(hdev);
2768         return err;
2769 }
2770
2771 int hci_get_dev_list(void __user *arg)
2772 {
2773         struct hci_dev *hdev;
2774         struct hci_dev_list_req *dl;
2775         struct hci_dev_req *dr;
2776         int n = 0, size, err;
2777         __u16 dev_num;
2778
2779         if (get_user(dev_num, (__u16 __user *) arg))
2780                 return -EFAULT;
2781
2782         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2783                 return -EINVAL;
2784
2785         size = sizeof(*dl) + dev_num * sizeof(*dr);
2786
2787         dl = kzalloc(size, GFP_KERNEL);
2788         if (!dl)
2789                 return -ENOMEM;
2790
2791         dr = dl->dev_req;
2792
2793         read_lock(&hci_dev_list_lock);
2794         list_for_each_entry(hdev, &hci_dev_list, list) {
2795                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2796                         cancel_delayed_work(&hdev->power_off);
2797
2798                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2799                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2800
2801                 (dr + n)->dev_id  = hdev->id;
2802                 (dr + n)->dev_opt = hdev->flags;
2803
2804                 if (++n >= dev_num)
2805                         break;
2806         }
2807         read_unlock(&hci_dev_list_lock);
2808
2809         dl->dev_num = n;
2810         size = sizeof(*dl) + n * sizeof(*dr);
2811
2812         err = copy_to_user(arg, dl, size);
2813         kfree(dl);
2814
2815         return err ? -EFAULT : 0;
2816 }
2817
2818 int hci_get_dev_info(void __user *arg)
2819 {
2820         struct hci_dev *hdev;
2821         struct hci_dev_info di;
2822         int err = 0;
2823
2824         if (copy_from_user(&di, arg, sizeof(di)))
2825                 return -EFAULT;
2826
2827         hdev = hci_dev_get(di.dev_id);
2828         if (!hdev)
2829                 return -ENODEV;
2830
2831         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2832                 cancel_delayed_work_sync(&hdev->power_off);
2833
2834         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2835                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2836
2837         strcpy(di.name, hdev->name);
2838         di.bdaddr   = hdev->bdaddr;
2839         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2840         di.flags    = hdev->flags;
2841         di.pkt_type = hdev->pkt_type;
2842         if (lmp_bredr_capable(hdev)) {
2843                 di.acl_mtu  = hdev->acl_mtu;
2844                 di.acl_pkts = hdev->acl_pkts;
2845                 di.sco_mtu  = hdev->sco_mtu;
2846                 di.sco_pkts = hdev->sco_pkts;
2847         } else {
2848                 di.acl_mtu  = hdev->le_mtu;
2849                 di.acl_pkts = hdev->le_pkts;
2850                 di.sco_mtu  = 0;
2851                 di.sco_pkts = 0;
2852         }
2853         di.link_policy = hdev->link_policy;
2854         di.link_mode   = hdev->link_mode;
2855
2856         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2857         memcpy(&di.features, &hdev->features, sizeof(di.features));
2858
2859         if (copy_to_user(arg, &di, sizeof(di)))
2860                 err = -EFAULT;
2861
2862         hci_dev_put(hdev);
2863
2864         return err;
2865 }
2866
2867 /* ---- Interface to HCI drivers ---- */
2868
2869 static int hci_rfkill_set_block(void *data, bool blocked)
2870 {
2871         struct hci_dev *hdev = data;
2872
2873         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2874
2875         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2876                 return -EBUSY;
2877
2878         if (blocked) {
2879                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2880                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2881                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2882                         hci_dev_do_close(hdev);
2883         } else {
2884                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2885         }
2886
2887         return 0;
2888 }
2889
2890 static const struct rfkill_ops hci_rfkill_ops = {
2891         .set_block = hci_rfkill_set_block,
2892 };
2893
2894 static void hci_power_on(struct work_struct *work)
2895 {
2896         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2897         int err;
2898
2899         BT_DBG("%s", hdev->name);
2900
2901         err = hci_dev_do_open(hdev);
2902         if (err < 0) {
2903                 mgmt_set_powered_failed(hdev, err);
2904                 return;
2905         }
2906
2907         /* During the HCI setup phase, a few error conditions are
2908          * ignored and they need to be checked now. If they are still
2909          * valid, it is important to turn the device back off.
2910          */
2911         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2912             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2913             (hdev->dev_type == HCI_BREDR &&
2914              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2915              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2916                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2917                 hci_dev_do_close(hdev);
2918         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2919                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2920                                    HCI_AUTO_OFF_TIMEOUT);
2921         }
2922
2923         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2924                 /* For unconfigured devices, set the HCI_RAW flag
2925                  * so that userspace can easily identify them.
2926                  */
2927                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2928                         set_bit(HCI_RAW, &hdev->flags);
2929
2930                 /* For fully configured devices, this will send
2931                  * the Index Added event. For unconfigured devices,
2932                  * it will send Unconfigued Index Added event.
2933                  *
2934                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2935                  * and no event will be send.
2936                  */
2937                 mgmt_index_added(hdev);
2938         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2939                 /* When the controller is now configured, then it
2940                  * is important to clear the HCI_RAW flag.
2941                  */
2942                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2943                         clear_bit(HCI_RAW, &hdev->flags);
2944
2945                 /* Powering on the controller with HCI_CONFIG set only
2946                  * happens with the transition from unconfigured to
2947                  * configured. This will send the Index Added event.
2948                  */
2949                 mgmt_index_added(hdev);
2950         }
2951 }
2952
2953 static void hci_power_off(struct work_struct *work)
2954 {
2955         struct hci_dev *hdev = container_of(work, struct hci_dev,
2956                                             power_off.work);
2957
2958         BT_DBG("%s", hdev->name);
2959
2960         hci_dev_do_close(hdev);
2961 }
2962
2963 static void hci_discov_off(struct work_struct *work)
2964 {
2965         struct hci_dev *hdev;
2966
2967         hdev = container_of(work, struct hci_dev, discov_off.work);
2968
2969         BT_DBG("%s", hdev->name);
2970
2971         mgmt_discoverable_timeout(hdev);
2972 }
2973
2974 void hci_uuids_clear(struct hci_dev *hdev)
2975 {
2976         struct bt_uuid *uuid, *tmp;
2977
2978         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2979                 list_del(&uuid->list);
2980                 kfree(uuid);
2981         }
2982 }
2983
2984 void hci_link_keys_clear(struct hci_dev *hdev)
2985 {
2986         struct list_head *p, *n;
2987
2988         list_for_each_safe(p, n, &hdev->link_keys) {
2989                 struct link_key *key;
2990
2991                 key = list_entry(p, struct link_key, list);
2992
2993                 list_del(p);
2994                 kfree(key);
2995         }
2996 }
2997
2998 void hci_smp_ltks_clear(struct hci_dev *hdev)
2999 {
3000         struct smp_ltk *k, *tmp;
3001
3002         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3003                 list_del(&k->list);
3004                 kfree(k);
3005         }
3006 }
3007
3008 void hci_smp_irks_clear(struct hci_dev *hdev)
3009 {
3010         struct smp_irk *k, *tmp;
3011
3012         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3013                 list_del(&k->list);
3014                 kfree(k);
3015         }
3016 }
3017
3018 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3019 {
3020         struct link_key *k;
3021
3022         list_for_each_entry(k, &hdev->link_keys, list)
3023                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3024                         return k;
3025
3026         return NULL;
3027 }
3028
3029 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3030                                u8 key_type, u8 old_key_type)
3031 {
3032         /* Legacy key */
3033         if (key_type < 0x03)
3034                 return true;
3035
3036         /* Debug keys are insecure so don't store them persistently */
3037         if (key_type == HCI_LK_DEBUG_COMBINATION)
3038                 return false;
3039
3040         /* Changed combination key and there's no previous one */
3041         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3042                 return false;
3043
3044         /* Security mode 3 case */
3045         if (!conn)
3046                 return true;
3047
3048         /* Neither local nor remote side had no-bonding as requirement */
3049         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3050                 return true;
3051
3052         /* Local side had dedicated bonding as requirement */
3053         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3054                 return true;
3055
3056         /* Remote side had dedicated bonding as requirement */
3057         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3058                 return true;
3059
3060         /* If none of the above criteria match, then don't store the key
3061          * persistently */
3062         return false;
3063 }
3064
3065 static bool ltk_type_master(u8 type)
3066 {
3067         return (type == SMP_LTK);
3068 }
3069
3070 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3071                              bool master)
3072 {
3073         struct smp_ltk *k;
3074
3075         list_for_each_entry(k, &hdev->long_term_keys, list) {
3076                 if (k->ediv != ediv || k->rand != rand)
3077                         continue;
3078
3079                 if (ltk_type_master(k->type) != master)
3080                         continue;
3081
3082                 return k;
3083         }
3084
3085         return NULL;
3086 }
3087
3088 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3089                                      u8 addr_type, bool master)
3090 {
3091         struct smp_ltk *k;
3092
3093         list_for_each_entry(k, &hdev->long_term_keys, list)
3094                 if (addr_type == k->bdaddr_type &&
3095                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3096                     ltk_type_master(k->type) == master)
3097                         return k;
3098
3099         return NULL;
3100 }
3101
3102 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3103 {
3104         struct smp_irk *irk;
3105
3106         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3107                 if (!bacmp(&irk->rpa, rpa))
3108                         return irk;
3109         }
3110
3111         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3112                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3113                         bacpy(&irk->rpa, rpa);
3114                         return irk;
3115                 }
3116         }
3117
3118         return NULL;
3119 }
3120
3121 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3122                                      u8 addr_type)
3123 {
3124         struct smp_irk *irk;
3125
3126         /* Identity Address must be public or static random */
3127         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3128                 return NULL;
3129
3130         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3131                 if (addr_type == irk->addr_type &&
3132                     bacmp(bdaddr, &irk->bdaddr) == 0)
3133                         return irk;
3134         }
3135
3136         return NULL;
3137 }
3138
3139 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3140                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3141                                   u8 pin_len, bool *persistent)
3142 {
3143         struct link_key *key, *old_key;
3144         u8 old_key_type;
3145
3146         old_key = hci_find_link_key(hdev, bdaddr);
3147         if (old_key) {
3148                 old_key_type = old_key->type;
3149                 key = old_key;
3150         } else {
3151                 old_key_type = conn ? conn->key_type : 0xff;
3152                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3153                 if (!key)
3154                         return NULL;
3155                 list_add(&key->list, &hdev->link_keys);
3156         }
3157
3158         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3159
3160         /* Some buggy controller combinations generate a changed
3161          * combination key for legacy pairing even when there's no
3162          * previous key */
3163         if (type == HCI_LK_CHANGED_COMBINATION &&
3164             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3165                 type = HCI_LK_COMBINATION;
3166                 if (conn)
3167                         conn->key_type = type;
3168         }
3169
3170         bacpy(&key->bdaddr, bdaddr);
3171         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3172         key->pin_len = pin_len;
3173
3174         if (type == HCI_LK_CHANGED_COMBINATION)
3175                 key->type = old_key_type;
3176         else
3177                 key->type = type;
3178
3179         if (persistent)
3180                 *persistent = hci_persistent_key(hdev, conn, type,
3181                                                  old_key_type);
3182
3183         return key;
3184 }
3185
3186 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3187                             u8 addr_type, u8 type, u8 authenticated,
3188                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3189 {
3190         struct smp_ltk *key, *old_key;
3191         bool master = ltk_type_master(type);
3192
3193         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3194         if (old_key)
3195                 key = old_key;
3196         else {
3197                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3198                 if (!key)
3199                         return NULL;
3200                 list_add(&key->list, &hdev->long_term_keys);
3201         }
3202
3203         bacpy(&key->bdaddr, bdaddr);
3204         key->bdaddr_type = addr_type;
3205         memcpy(key->val, tk, sizeof(key->val));
3206         key->authenticated = authenticated;
3207         key->ediv = ediv;
3208         key->rand = rand;
3209         key->enc_size = enc_size;
3210         key->type = type;
3211
3212         return key;
3213 }
3214
3215 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3216                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3217 {
3218         struct smp_irk *irk;
3219
3220         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3221         if (!irk) {
3222                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3223                 if (!irk)
3224                         return NULL;
3225
3226                 bacpy(&irk->bdaddr, bdaddr);
3227                 irk->addr_type = addr_type;
3228
3229                 list_add(&irk->list, &hdev->identity_resolving_keys);
3230         }
3231
3232         memcpy(irk->val, val, 16);
3233         bacpy(&irk->rpa, rpa);
3234
3235         return irk;
3236 }
3237
3238 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3239 {
3240         struct link_key *key;
3241
3242         key = hci_find_link_key(hdev, bdaddr);
3243         if (!key)
3244                 return -ENOENT;
3245
3246         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3247
3248         list_del(&key->list);
3249         kfree(key);
3250
3251         return 0;
3252 }
3253
3254 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3255 {
3256         struct smp_ltk *k, *tmp;
3257         int removed = 0;
3258
3259         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3260                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3261                         continue;
3262
3263                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3264
3265                 list_del(&k->list);
3266                 kfree(k);
3267                 removed++;
3268         }
3269
3270         return removed ? 0 : -ENOENT;
3271 }
3272
3273 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3274 {
3275         struct smp_irk *k, *tmp;
3276
3277         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3278                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3279                         continue;
3280
3281                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3282
3283                 list_del(&k->list);
3284                 kfree(k);
3285         }
3286 }
3287
3288 /* HCI command timer function */
3289 static void hci_cmd_timeout(struct work_struct *work)
3290 {
3291         struct hci_dev *hdev = container_of(work, struct hci_dev,
3292                                             cmd_timer.work);
3293
3294         if (hdev->sent_cmd) {
3295                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3296                 u16 opcode = __le16_to_cpu(sent->opcode);
3297
3298                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3299         } else {
3300                 BT_ERR("%s command tx timeout", hdev->name);
3301         }
3302
3303         atomic_set(&hdev->cmd_cnt, 1);
3304         queue_work(hdev->workqueue, &hdev->cmd_work);
3305 }
3306
3307 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3308                                           bdaddr_t *bdaddr)
3309 {
3310         struct oob_data *data;
3311
3312         list_for_each_entry(data, &hdev->remote_oob_data, list)
3313                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3314                         return data;
3315
3316         return NULL;
3317 }
3318
3319 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3320 {
3321         struct oob_data *data;
3322
3323         data = hci_find_remote_oob_data(hdev, bdaddr);
3324         if (!data)
3325                 return -ENOENT;
3326
3327         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3328
3329         list_del(&data->list);
3330         kfree(data);
3331
3332         return 0;
3333 }
3334
3335 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3336 {
3337         struct oob_data *data, *n;
3338
3339         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3340                 list_del(&data->list);
3341                 kfree(data);
3342         }
3343 }
3344
3345 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3346                             u8 *hash, u8 *randomizer)
3347 {
3348         struct oob_data *data;
3349
3350         data = hci_find_remote_oob_data(hdev, bdaddr);
3351         if (!data) {
3352                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3353                 if (!data)
3354                         return -ENOMEM;
3355
3356                 bacpy(&data->bdaddr, bdaddr);
3357                 list_add(&data->list, &hdev->remote_oob_data);
3358         }
3359
3360         memcpy(data->hash192, hash, sizeof(data->hash192));
3361         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3362
3363         memset(data->hash256, 0, sizeof(data->hash256));
3364         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3365
3366         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3367
3368         return 0;
3369 }
3370
3371 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3372                                 u8 *hash192, u8 *randomizer192,
3373                                 u8 *hash256, u8 *randomizer256)
3374 {
3375         struct oob_data *data;
3376
3377         data = hci_find_remote_oob_data(hdev, bdaddr);
3378         if (!data) {
3379                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3380                 if (!data)
3381                         return -ENOMEM;
3382
3383                 bacpy(&data->bdaddr, bdaddr);
3384                 list_add(&data->list, &hdev->remote_oob_data);
3385         }
3386
3387         memcpy(data->hash192, hash192, sizeof(data->hash192));
3388         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3389
3390         memcpy(data->hash256, hash256, sizeof(data->hash256));
3391         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3392
3393         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3394
3395         return 0;
3396 }
3397
3398 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3399                                          bdaddr_t *bdaddr, u8 type)
3400 {
3401         struct bdaddr_list *b;
3402
3403         list_for_each_entry(b, bdaddr_list, list) {
3404                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3405                         return b;
3406         }
3407
3408         return NULL;
3409 }
3410
3411 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3412 {
3413         struct list_head *p, *n;
3414
3415         list_for_each_safe(p, n, bdaddr_list) {
3416                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3417
3418                 list_del(p);
3419                 kfree(b);
3420         }
3421 }
3422
3423 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3424 {
3425         struct bdaddr_list *entry;
3426
3427         if (!bacmp(bdaddr, BDADDR_ANY))
3428                 return -EBADF;
3429
3430         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3431                 return -EEXIST;
3432
3433         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3434         if (!entry)
3435                 return -ENOMEM;
3436
3437         bacpy(&entry->bdaddr, bdaddr);
3438         entry->bdaddr_type = type;
3439
3440         list_add(&entry->list, list);
3441
3442         return 0;
3443 }
3444
3445 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3446 {
3447         struct bdaddr_list *entry;
3448
3449         if (!bacmp(bdaddr, BDADDR_ANY)) {
3450                 hci_bdaddr_list_clear(list);
3451                 return 0;
3452         }
3453
3454         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3455         if (!entry)
3456                 return -ENOENT;
3457
3458         list_del(&entry->list);
3459         kfree(entry);
3460
3461         return 0;
3462 }
3463
3464 /* This function requires the caller holds hdev->lock */
3465 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3466                                                bdaddr_t *addr, u8 addr_type)
3467 {
3468         struct hci_conn_params *params;
3469
3470         /* The conn params list only contains identity addresses */
3471         if (!hci_is_identity_address(addr, addr_type))
3472                 return NULL;
3473
3474         list_for_each_entry(params, &hdev->le_conn_params, list) {
3475                 if (bacmp(&params->addr, addr) == 0 &&
3476                     params->addr_type == addr_type) {
3477                         return params;
3478                 }
3479         }
3480
3481         return NULL;
3482 }
3483
3484 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3485 {
3486         struct hci_conn *conn;
3487
3488         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3489         if (!conn)
3490                 return false;
3491
3492         if (conn->dst_type != type)
3493                 return false;
3494
3495         if (conn->state != BT_CONNECTED)
3496                 return false;
3497
3498         return true;
3499 }
3500
3501 /* This function requires the caller holds hdev->lock */
3502 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3503                                                   bdaddr_t *addr, u8 addr_type)
3504 {
3505         struct hci_conn_params *param;
3506
3507         /* The list only contains identity addresses */
3508         if (!hci_is_identity_address(addr, addr_type))
3509                 return NULL;
3510
3511         list_for_each_entry(param, list, action) {
3512                 if (bacmp(&param->addr, addr) == 0 &&
3513                     param->addr_type == addr_type)
3514                         return param;
3515         }
3516
3517         return NULL;
3518 }
3519
3520 /* This function requires the caller holds hdev->lock */
3521 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3522                                             bdaddr_t *addr, u8 addr_type)
3523 {
3524         struct hci_conn_params *params;
3525
3526         if (!hci_is_identity_address(addr, addr_type))
3527                 return NULL;
3528
3529         params = hci_conn_params_lookup(hdev, addr, addr_type);
3530         if (params)
3531                 return params;
3532
3533         params = kzalloc(sizeof(*params), GFP_KERNEL);
3534         if (!params) {
3535                 BT_ERR("Out of memory");
3536                 return NULL;
3537         }
3538
3539         bacpy(&params->addr, addr);
3540         params->addr_type = addr_type;
3541
3542         list_add(&params->list, &hdev->le_conn_params);
3543         INIT_LIST_HEAD(&params->action);
3544
3545         params->conn_min_interval = hdev->le_conn_min_interval;
3546         params->conn_max_interval = hdev->le_conn_max_interval;
3547         params->conn_latency = hdev->le_conn_latency;
3548         params->supervision_timeout = hdev->le_supv_timeout;
3549         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3550
3551         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3552
3553         return params;
3554 }
3555
3556 /* This function requires the caller holds hdev->lock */
3557 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3558                         u8 auto_connect)
3559 {
3560         struct hci_conn_params *params;
3561
3562         params = hci_conn_params_add(hdev, addr, addr_type);
3563         if (!params)
3564                 return -EIO;
3565
3566         if (params->auto_connect == auto_connect)
3567                 return 0;
3568
3569         list_del_init(&params->action);
3570
3571         switch (auto_connect) {
3572         case HCI_AUTO_CONN_DISABLED:
3573         case HCI_AUTO_CONN_LINK_LOSS:
3574                 hci_update_background_scan(hdev);
3575                 break;
3576         case HCI_AUTO_CONN_REPORT:
3577                 list_add(&params->action, &hdev->pend_le_reports);
3578                 hci_update_background_scan(hdev);
3579                 break;
3580         case HCI_AUTO_CONN_ALWAYS:
3581                 if (!is_connected(hdev, addr, addr_type)) {
3582                         list_add(&params->action, &hdev->pend_le_conns);
3583                         hci_update_background_scan(hdev);
3584                 }
3585                 break;
3586         }
3587
3588         params->auto_connect = auto_connect;
3589
3590         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3591                auto_connect);
3592
3593         return 0;
3594 }
3595
3596 /* This function requires the caller holds hdev->lock */
3597 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3598 {
3599         struct hci_conn_params *params;
3600
3601         params = hci_conn_params_lookup(hdev, addr, addr_type);
3602         if (!params)
3603                 return;
3604
3605         list_del(&params->action);
3606         list_del(&params->list);
3607         kfree(params);
3608
3609         hci_update_background_scan(hdev);
3610
3611         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3612 }
3613
3614 /* This function requires the caller holds hdev->lock */
3615 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3616 {
3617         struct hci_conn_params *params, *tmp;
3618
3619         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3620                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3621                         continue;
3622                 list_del(&params->list);
3623                 kfree(params);
3624         }
3625
3626         BT_DBG("All LE disabled connection parameters were removed");
3627 }
3628
3629 /* This function requires the caller holds hdev->lock */
3630 void hci_conn_params_clear_all(struct hci_dev *hdev)
3631 {
3632         struct hci_conn_params *params, *tmp;
3633
3634         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3635                 list_del(&params->action);
3636                 list_del(&params->list);
3637                 kfree(params);
3638         }
3639
3640         hci_update_background_scan(hdev);
3641
3642         BT_DBG("All LE connection parameters were removed");
3643 }
3644
3645 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3646 {
3647         if (status) {
3648                 BT_ERR("Failed to start inquiry: status %d", status);
3649
3650                 hci_dev_lock(hdev);
3651                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3652                 hci_dev_unlock(hdev);
3653                 return;
3654         }
3655 }
3656
3657 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3658 {
3659         /* General inquiry access code (GIAC) */
3660         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3661         struct hci_request req;
3662         struct hci_cp_inquiry cp;
3663         int err;
3664
3665         if (status) {
3666                 BT_ERR("Failed to disable LE scanning: status %d", status);
3667                 return;
3668         }
3669
3670         switch (hdev->discovery.type) {
3671         case DISCOV_TYPE_LE:
3672                 hci_dev_lock(hdev);
3673                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3674                 hci_dev_unlock(hdev);
3675                 break;
3676
3677         case DISCOV_TYPE_INTERLEAVED:
3678                 hci_req_init(&req, hdev);
3679
3680                 memset(&cp, 0, sizeof(cp));
3681                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3682                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3683                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3684
3685                 hci_dev_lock(hdev);
3686
3687                 hci_inquiry_cache_flush(hdev);
3688
3689                 err = hci_req_run(&req, inquiry_complete);
3690                 if (err) {
3691                         BT_ERR("Inquiry request failed: err %d", err);
3692                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3693                 }
3694
3695                 hci_dev_unlock(hdev);
3696                 break;
3697         }
3698 }
3699
3700 static void le_scan_disable_work(struct work_struct *work)
3701 {
3702         struct hci_dev *hdev = container_of(work, struct hci_dev,
3703                                             le_scan_disable.work);
3704         struct hci_request req;
3705         int err;
3706
3707         BT_DBG("%s", hdev->name);
3708
3709         hci_req_init(&req, hdev);
3710
3711         hci_req_add_le_scan_disable(&req);
3712
3713         err = hci_req_run(&req, le_scan_disable_work_complete);
3714         if (err)
3715                 BT_ERR("Disable LE scanning request failed: err %d", err);
3716 }
3717
3718 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3719 {
3720         struct hci_dev *hdev = req->hdev;
3721
3722         /* If we're advertising or initiating an LE connection we can't
3723          * go ahead and change the random address at this time. This is
3724          * because the eventual initiator address used for the
3725          * subsequently created connection will be undefined (some
3726          * controllers use the new address and others the one we had
3727          * when the operation started).
3728          *
3729          * In this kind of scenario skip the update and let the random
3730          * address be updated at the next cycle.
3731          */
3732         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3733             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3734                 BT_DBG("Deferring random address update");
3735                 return;
3736         }
3737
3738         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3739 }
3740
3741 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3742                               u8 *own_addr_type)
3743 {
3744         struct hci_dev *hdev = req->hdev;
3745         int err;
3746
3747         /* If privacy is enabled use a resolvable private address. If
3748          * current RPA has expired or there is something else than
3749          * the current RPA in use, then generate a new one.
3750          */
3751         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3752                 int to;
3753
3754                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3755
3756                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3757                     !bacmp(&hdev->random_addr, &hdev->rpa))
3758                         return 0;
3759
3760                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3761                 if (err < 0) {
3762                         BT_ERR("%s failed to generate new RPA", hdev->name);
3763                         return err;
3764                 }
3765
3766                 set_random_addr(req, &hdev->rpa);
3767
3768                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3769                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3770
3771                 return 0;
3772         }
3773
3774         /* In case of required privacy without resolvable private address,
3775          * use an unresolvable private address. This is useful for active
3776          * scanning and non-connectable advertising.
3777          */
3778         if (require_privacy) {
3779                 bdaddr_t urpa;
3780
3781                 get_random_bytes(&urpa, 6);
3782                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3783
3784                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3785                 set_random_addr(req, &urpa);
3786                 return 0;
3787         }
3788
3789         /* If forcing static address is in use or there is no public
3790          * address use the static address as random address (but skip
3791          * the HCI command if the current random address is already the
3792          * static one.
3793          */
3794         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3795             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3796                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3797                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3798                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3799                                     &hdev->static_addr);
3800                 return 0;
3801         }
3802
3803         /* Neither privacy nor static address is being used so use a
3804          * public address.
3805          */
3806         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3807
3808         return 0;
3809 }
3810
3811 /* Copy the Identity Address of the controller.
3812  *
3813  * If the controller has a public BD_ADDR, then by default use that one.
3814  * If this is a LE only controller without a public address, default to
3815  * the static random address.
3816  *
3817  * For debugging purposes it is possible to force controllers with a
3818  * public address to use the static random address instead.
3819  */
3820 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3821                                u8 *bdaddr_type)
3822 {
3823         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3824             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3825                 bacpy(bdaddr, &hdev->static_addr);
3826                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3827         } else {
3828                 bacpy(bdaddr, &hdev->bdaddr);
3829                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3830         }
3831 }
3832
3833 /* Alloc HCI device */
3834 struct hci_dev *hci_alloc_dev(void)
3835 {
3836         struct hci_dev *hdev;
3837
3838         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3839         if (!hdev)
3840                 return NULL;
3841
3842         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3843         hdev->esco_type = (ESCO_HV1);
3844         hdev->link_mode = (HCI_LM_ACCEPT);
3845         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3846         hdev->io_capability = 0x03;     /* No Input No Output */
3847         hdev->manufacturer = 0xffff;    /* Default to internal use */
3848         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3849         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3850
3851         hdev->sniff_max_interval = 800;
3852         hdev->sniff_min_interval = 80;
3853
3854         hdev->le_adv_channel_map = 0x07;
3855         hdev->le_scan_interval = 0x0060;
3856         hdev->le_scan_window = 0x0030;
3857         hdev->le_conn_min_interval = 0x0028;
3858         hdev->le_conn_max_interval = 0x0038;
3859         hdev->le_conn_latency = 0x0000;
3860         hdev->le_supv_timeout = 0x002a;
3861
3862         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3863         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3864         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3865         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3866
3867         mutex_init(&hdev->lock);
3868         mutex_init(&hdev->req_lock);
3869
3870         INIT_LIST_HEAD(&hdev->mgmt_pending);
3871         INIT_LIST_HEAD(&hdev->blacklist);
3872         INIT_LIST_HEAD(&hdev->whitelist);
3873         INIT_LIST_HEAD(&hdev->uuids);
3874         INIT_LIST_HEAD(&hdev->link_keys);
3875         INIT_LIST_HEAD(&hdev->long_term_keys);
3876         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3877         INIT_LIST_HEAD(&hdev->remote_oob_data);
3878         INIT_LIST_HEAD(&hdev->le_white_list);
3879         INIT_LIST_HEAD(&hdev->le_conn_params);
3880         INIT_LIST_HEAD(&hdev->pend_le_conns);
3881         INIT_LIST_HEAD(&hdev->pend_le_reports);
3882         INIT_LIST_HEAD(&hdev->conn_hash.list);
3883
3884         INIT_WORK(&hdev->rx_work, hci_rx_work);
3885         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3886         INIT_WORK(&hdev->tx_work, hci_tx_work);
3887         INIT_WORK(&hdev->power_on, hci_power_on);
3888
3889         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3890         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3891         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3892
3893         skb_queue_head_init(&hdev->rx_q);
3894         skb_queue_head_init(&hdev->cmd_q);
3895         skb_queue_head_init(&hdev->raw_q);
3896
3897         init_waitqueue_head(&hdev->req_wait_q);
3898
3899         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3900
3901         hci_init_sysfs(hdev);
3902         discovery_init(hdev);
3903
3904         return hdev;
3905 }
3906 EXPORT_SYMBOL(hci_alloc_dev);
3907
3908 /* Free HCI device */
3909 void hci_free_dev(struct hci_dev *hdev)
3910 {
3911         /* will free via device release */
3912         put_device(&hdev->dev);
3913 }
3914 EXPORT_SYMBOL(hci_free_dev);
3915
3916 /* Register HCI device */
3917 int hci_register_dev(struct hci_dev *hdev)
3918 {
3919         int id, error;
3920
3921         if (!hdev->open || !hdev->close || !hdev->send)
3922                 return -EINVAL;
3923
3924         /* Do not allow HCI_AMP devices to register at index 0,
3925          * so the index can be used as the AMP controller ID.
3926          */
3927         switch (hdev->dev_type) {
3928         case HCI_BREDR:
3929                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3930                 break;
3931         case HCI_AMP:
3932                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3933                 break;
3934         default:
3935                 return -EINVAL;
3936         }
3937
3938         if (id < 0)
3939                 return id;
3940
3941         sprintf(hdev->name, "hci%d", id);
3942         hdev->id = id;
3943
3944         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3945
3946         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3947                                           WQ_MEM_RECLAIM, 1, hdev->name);
3948         if (!hdev->workqueue) {
3949                 error = -ENOMEM;
3950                 goto err;
3951         }
3952
3953         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3954                                               WQ_MEM_RECLAIM, 1, hdev->name);
3955         if (!hdev->req_workqueue) {
3956                 destroy_workqueue(hdev->workqueue);
3957                 error = -ENOMEM;
3958                 goto err;
3959         }
3960
3961         if (!IS_ERR_OR_NULL(bt_debugfs))
3962                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3963
3964         dev_set_name(&hdev->dev, "%s", hdev->name);
3965
3966         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3967                                                CRYPTO_ALG_ASYNC);
3968         if (IS_ERR(hdev->tfm_aes)) {
3969                 BT_ERR("Unable to create crypto context");
3970                 error = PTR_ERR(hdev->tfm_aes);
3971                 hdev->tfm_aes = NULL;
3972                 goto err_wqueue;
3973         }
3974
3975         error = device_add(&hdev->dev);
3976         if (error < 0)
3977                 goto err_tfm;
3978
3979         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3980                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3981                                     hdev);
3982         if (hdev->rfkill) {
3983                 if (rfkill_register(hdev->rfkill) < 0) {
3984                         rfkill_destroy(hdev->rfkill);
3985                         hdev->rfkill = NULL;
3986                 }
3987         }
3988
3989         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3990                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3991
3992         set_bit(HCI_SETUP, &hdev->dev_flags);
3993         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3994
3995         if (hdev->dev_type == HCI_BREDR) {
3996                 /* Assume BR/EDR support until proven otherwise (such as
3997                  * through reading supported features during init.
3998                  */
3999                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4000         }
4001
4002         write_lock(&hci_dev_list_lock);
4003         list_add(&hdev->list, &hci_dev_list);
4004         write_unlock(&hci_dev_list_lock);
4005
4006         /* Devices that are marked for raw-only usage are unconfigured
4007          * and should not be included in normal operation.
4008          */
4009         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4010                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4011
4012         hci_notify(hdev, HCI_DEV_REG);
4013         hci_dev_hold(hdev);
4014
4015         queue_work(hdev->req_workqueue, &hdev->power_on);
4016
4017         return id;
4018
4019 err_tfm:
4020         crypto_free_blkcipher(hdev->tfm_aes);
4021 err_wqueue:
4022         destroy_workqueue(hdev->workqueue);
4023         destroy_workqueue(hdev->req_workqueue);
4024 err:
4025         ida_simple_remove(&hci_index_ida, hdev->id);
4026
4027         return error;
4028 }
4029 EXPORT_SYMBOL(hci_register_dev);
4030
4031 /* Unregister HCI device */
4032 void hci_unregister_dev(struct hci_dev *hdev)
4033 {
4034         int i, id;
4035
4036         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4037
4038         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4039
4040         id = hdev->id;
4041
4042         write_lock(&hci_dev_list_lock);
4043         list_del(&hdev->list);
4044         write_unlock(&hci_dev_list_lock);
4045
4046         hci_dev_do_close(hdev);
4047
4048         for (i = 0; i < NUM_REASSEMBLY; i++)
4049                 kfree_skb(hdev->reassembly[i]);
4050
4051         cancel_work_sync(&hdev->power_on);
4052
4053         if (!test_bit(HCI_INIT, &hdev->flags) &&
4054             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4055             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4056                 hci_dev_lock(hdev);
4057                 mgmt_index_removed(hdev);
4058                 hci_dev_unlock(hdev);
4059         }
4060
4061         /* mgmt_index_removed should take care of emptying the
4062          * pending list */
4063         BUG_ON(!list_empty(&hdev->mgmt_pending));
4064
4065         hci_notify(hdev, HCI_DEV_UNREG);
4066
4067         if (hdev->rfkill) {
4068                 rfkill_unregister(hdev->rfkill);
4069                 rfkill_destroy(hdev->rfkill);
4070         }
4071
4072         if (hdev->tfm_aes)
4073                 crypto_free_blkcipher(hdev->tfm_aes);
4074
4075         device_del(&hdev->dev);
4076
4077         debugfs_remove_recursive(hdev->debugfs);
4078
4079         destroy_workqueue(hdev->workqueue);
4080         destroy_workqueue(hdev->req_workqueue);
4081
4082         hci_dev_lock(hdev);
4083         hci_bdaddr_list_clear(&hdev->blacklist);
4084         hci_bdaddr_list_clear(&hdev->whitelist);
4085         hci_uuids_clear(hdev);
4086         hci_link_keys_clear(hdev);
4087         hci_smp_ltks_clear(hdev);
4088         hci_smp_irks_clear(hdev);
4089         hci_remote_oob_data_clear(hdev);
4090         hci_bdaddr_list_clear(&hdev->le_white_list);
4091         hci_conn_params_clear_all(hdev);
4092         hci_dev_unlock(hdev);
4093
4094         hci_dev_put(hdev);
4095
4096         ida_simple_remove(&hci_index_ida, id);
4097 }
4098 EXPORT_SYMBOL(hci_unregister_dev);
4099
4100 /* Suspend HCI device */
4101 int hci_suspend_dev(struct hci_dev *hdev)
4102 {
4103         hci_notify(hdev, HCI_DEV_SUSPEND);
4104         return 0;
4105 }
4106 EXPORT_SYMBOL(hci_suspend_dev);
4107
4108 /* Resume HCI device */
4109 int hci_resume_dev(struct hci_dev *hdev)
4110 {
4111         hci_notify(hdev, HCI_DEV_RESUME);
4112         return 0;
4113 }
4114 EXPORT_SYMBOL(hci_resume_dev);
4115
4116 /* Receive frame from HCI drivers */
4117 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4118 {
4119         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4120                       && !test_bit(HCI_INIT, &hdev->flags))) {
4121                 kfree_skb(skb);
4122                 return -ENXIO;
4123         }
4124
4125         /* Incoming skb */
4126         bt_cb(skb)->incoming = 1;
4127
4128         /* Time stamp */
4129         __net_timestamp(skb);
4130
4131         skb_queue_tail(&hdev->rx_q, skb);
4132         queue_work(hdev->workqueue, &hdev->rx_work);
4133
4134         return 0;
4135 }
4136 EXPORT_SYMBOL(hci_recv_frame);
4137
4138 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4139                           int count, __u8 index)
4140 {
4141         int len = 0;
4142         int hlen = 0;
4143         int remain = count;
4144         struct sk_buff *skb;
4145         struct bt_skb_cb *scb;
4146
4147         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4148             index >= NUM_REASSEMBLY)
4149                 return -EILSEQ;
4150
4151         skb = hdev->reassembly[index];
4152
4153         if (!skb) {
4154                 switch (type) {
4155                 case HCI_ACLDATA_PKT:
4156                         len = HCI_MAX_FRAME_SIZE;
4157                         hlen = HCI_ACL_HDR_SIZE;
4158                         break;
4159                 case HCI_EVENT_PKT:
4160                         len = HCI_MAX_EVENT_SIZE;
4161                         hlen = HCI_EVENT_HDR_SIZE;
4162                         break;
4163                 case HCI_SCODATA_PKT:
4164                         len = HCI_MAX_SCO_SIZE;
4165                         hlen = HCI_SCO_HDR_SIZE;
4166                         break;
4167                 }
4168
4169                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4170                 if (!skb)
4171                         return -ENOMEM;
4172
4173                 scb = (void *) skb->cb;
4174                 scb->expect = hlen;
4175                 scb->pkt_type = type;
4176
4177                 hdev->reassembly[index] = skb;
4178         }
4179
4180         while (count) {
4181                 scb = (void *) skb->cb;
4182                 len = min_t(uint, scb->expect, count);
4183
4184                 memcpy(skb_put(skb, len), data, len);
4185
4186                 count -= len;
4187                 data += len;
4188                 scb->expect -= len;
4189                 remain = count;
4190
4191                 switch (type) {
4192                 case HCI_EVENT_PKT:
4193                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4194                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4195                                 scb->expect = h->plen;
4196
4197                                 if (skb_tailroom(skb) < scb->expect) {
4198                                         kfree_skb(skb);
4199                                         hdev->reassembly[index] = NULL;
4200                                         return -ENOMEM;
4201                                 }
4202                         }
4203                         break;
4204
4205                 case HCI_ACLDATA_PKT:
4206                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4207                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4208                                 scb->expect = __le16_to_cpu(h->dlen);
4209
4210                                 if (skb_tailroom(skb) < scb->expect) {
4211                                         kfree_skb(skb);
4212                                         hdev->reassembly[index] = NULL;
4213                                         return -ENOMEM;
4214                                 }
4215                         }
4216                         break;
4217
4218                 case HCI_SCODATA_PKT:
4219                         if (skb->len == HCI_SCO_HDR_SIZE) {
4220                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4221                                 scb->expect = h->dlen;
4222
4223                                 if (skb_tailroom(skb) < scb->expect) {
4224                                         kfree_skb(skb);
4225                                         hdev->reassembly[index] = NULL;
4226                                         return -ENOMEM;
4227                                 }
4228                         }
4229                         break;
4230                 }
4231
4232                 if (scb->expect == 0) {
4233                         /* Complete frame */
4234
4235                         bt_cb(skb)->pkt_type = type;
4236                         hci_recv_frame(hdev, skb);
4237
4238                         hdev->reassembly[index] = NULL;
4239                         return remain;
4240                 }
4241         }
4242
4243         return remain;
4244 }
4245
4246 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4247 {
4248         int rem = 0;
4249
4250         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4251                 return -EILSEQ;
4252
4253         while (count) {
4254                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4255                 if (rem < 0)
4256                         return rem;
4257
4258                 data += (count - rem);
4259                 count = rem;
4260         }
4261
4262         return rem;
4263 }
4264 EXPORT_SYMBOL(hci_recv_fragment);
4265
4266 #define STREAM_REASSEMBLY 0
4267
4268 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4269 {
4270         int type;
4271         int rem = 0;
4272
4273         while (count) {
4274                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4275
4276                 if (!skb) {
4277                         struct { char type; } *pkt;
4278
4279                         /* Start of the frame */
4280                         pkt = data;
4281                         type = pkt->type;
4282
4283                         data++;
4284                         count--;
4285                 } else
4286                         type = bt_cb(skb)->pkt_type;
4287
4288                 rem = hci_reassembly(hdev, type, data, count,
4289                                      STREAM_REASSEMBLY);
4290                 if (rem < 0)
4291                         return rem;
4292
4293                 data += (count - rem);
4294                 count = rem;
4295         }
4296
4297         return rem;
4298 }
4299 EXPORT_SYMBOL(hci_recv_stream_fragment);
4300
4301 /* ---- Interface to upper protocols ---- */
4302
4303 int hci_register_cb(struct hci_cb *cb)
4304 {
4305         BT_DBG("%p name %s", cb, cb->name);
4306
4307         write_lock(&hci_cb_list_lock);
4308         list_add(&cb->list, &hci_cb_list);
4309         write_unlock(&hci_cb_list_lock);
4310
4311         return 0;
4312 }
4313 EXPORT_SYMBOL(hci_register_cb);
4314
4315 int hci_unregister_cb(struct hci_cb *cb)
4316 {
4317         BT_DBG("%p name %s", cb, cb->name);
4318
4319         write_lock(&hci_cb_list_lock);
4320         list_del(&cb->list);
4321         write_unlock(&hci_cb_list_lock);
4322
4323         return 0;
4324 }
4325 EXPORT_SYMBOL(hci_unregister_cb);
4326
4327 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4328 {
4329         int err;
4330
4331         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4332
4333         /* Time stamp */
4334         __net_timestamp(skb);
4335
4336         /* Send copy to monitor */
4337         hci_send_to_monitor(hdev, skb);
4338
4339         if (atomic_read(&hdev->promisc)) {
4340                 /* Send copy to the sockets */
4341                 hci_send_to_sock(hdev, skb);
4342         }
4343
4344         /* Get rid of skb owner, prior to sending to the driver. */
4345         skb_orphan(skb);
4346
4347         err = hdev->send(hdev, skb);
4348         if (err < 0) {
4349                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4350                 kfree_skb(skb);
4351         }
4352 }
4353
4354 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4355 {
4356         skb_queue_head_init(&req->cmd_q);
4357         req->hdev = hdev;
4358         req->err = 0;
4359 }
4360
4361 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4362 {
4363         struct hci_dev *hdev = req->hdev;
4364         struct sk_buff *skb;
4365         unsigned long flags;
4366
4367         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4368
4369         /* If an error occured during request building, remove all HCI
4370          * commands queued on the HCI request queue.
4371          */
4372         if (req->err) {
4373                 skb_queue_purge(&req->cmd_q);
4374                 return req->err;
4375         }
4376
4377         /* Do not allow empty requests */
4378         if (skb_queue_empty(&req->cmd_q))
4379                 return -ENODATA;
4380
4381         skb = skb_peek_tail(&req->cmd_q);
4382         bt_cb(skb)->req.complete = complete;
4383
4384         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4385         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4386         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4387
4388         queue_work(hdev->workqueue, &hdev->cmd_work);
4389
4390         return 0;
4391 }
4392
4393 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4394                                        u32 plen, const void *param)
4395 {
4396         int len = HCI_COMMAND_HDR_SIZE + plen;
4397         struct hci_command_hdr *hdr;
4398         struct sk_buff *skb;
4399
4400         skb = bt_skb_alloc(len, GFP_ATOMIC);
4401         if (!skb)
4402                 return NULL;
4403
4404         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4405         hdr->opcode = cpu_to_le16(opcode);
4406         hdr->plen   = plen;
4407
4408         if (plen)
4409                 memcpy(skb_put(skb, plen), param, plen);
4410
4411         BT_DBG("skb len %d", skb->len);
4412
4413         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4414
4415         return skb;
4416 }
4417
4418 /* Send HCI command */
4419 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4420                  const void *param)
4421 {
4422         struct sk_buff *skb;
4423
4424         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4425
4426         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4427         if (!skb) {
4428                 BT_ERR("%s no memory for command", hdev->name);
4429                 return -ENOMEM;
4430         }
4431
4432         /* Stand-alone HCI commands must be flaged as
4433          * single-command requests.
4434          */
4435         bt_cb(skb)->req.start = true;
4436
4437         skb_queue_tail(&hdev->cmd_q, skb);
4438         queue_work(hdev->workqueue, &hdev->cmd_work);
4439
4440         return 0;
4441 }
4442
4443 /* Queue a command to an asynchronous HCI request */
4444 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4445                     const void *param, u8 event)
4446 {
4447         struct hci_dev *hdev = req->hdev;
4448         struct sk_buff *skb;
4449
4450         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4451
4452         /* If an error occured during request building, there is no point in
4453          * queueing the HCI command. We can simply return.
4454          */
4455         if (req->err)
4456                 return;
4457
4458         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4459         if (!skb) {
4460                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4461                        hdev->name, opcode);
4462                 req->err = -ENOMEM;
4463                 return;
4464         }
4465
4466         if (skb_queue_empty(&req->cmd_q))
4467                 bt_cb(skb)->req.start = true;
4468
4469         bt_cb(skb)->req.event = event;
4470
4471         skb_queue_tail(&req->cmd_q, skb);
4472 }
4473
4474 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4475                  const void *param)
4476 {
4477         hci_req_add_ev(req, opcode, plen, param, 0);
4478 }
4479
4480 /* Get data from the previously sent command */
4481 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4482 {
4483         struct hci_command_hdr *hdr;
4484
4485         if (!hdev->sent_cmd)
4486                 return NULL;
4487
4488         hdr = (void *) hdev->sent_cmd->data;
4489
4490         if (hdr->opcode != cpu_to_le16(opcode))
4491                 return NULL;
4492
4493         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4494
4495         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4496 }
4497
4498 /* Send ACL data */
4499 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4500 {
4501         struct hci_acl_hdr *hdr;
4502         int len = skb->len;
4503
4504         skb_push(skb, HCI_ACL_HDR_SIZE);
4505         skb_reset_transport_header(skb);
4506         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4507         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4508         hdr->dlen   = cpu_to_le16(len);
4509 }
4510
4511 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4512                           struct sk_buff *skb, __u16 flags)
4513 {
4514         struct hci_conn *conn = chan->conn;
4515         struct hci_dev *hdev = conn->hdev;
4516         struct sk_buff *list;
4517
4518         skb->len = skb_headlen(skb);
4519         skb->data_len = 0;
4520
4521         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4522
4523         switch (hdev->dev_type) {
4524         case HCI_BREDR:
4525                 hci_add_acl_hdr(skb, conn->handle, flags);
4526                 break;
4527         case HCI_AMP:
4528                 hci_add_acl_hdr(skb, chan->handle, flags);
4529                 break;
4530         default:
4531                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4532                 return;
4533         }
4534
4535         list = skb_shinfo(skb)->frag_list;
4536         if (!list) {
4537                 /* Non fragmented */
4538                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4539
4540                 skb_queue_tail(queue, skb);
4541         } else {
4542                 /* Fragmented */
4543                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4544
4545                 skb_shinfo(skb)->frag_list = NULL;
4546
4547                 /* Queue all fragments atomically */
4548                 spin_lock(&queue->lock);
4549
4550                 __skb_queue_tail(queue, skb);
4551
4552                 flags &= ~ACL_START;
4553                 flags |= ACL_CONT;
4554                 do {
4555                         skb = list; list = list->next;
4556
4557                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4558                         hci_add_acl_hdr(skb, conn->handle, flags);
4559
4560                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4561
4562                         __skb_queue_tail(queue, skb);
4563                 } while (list);
4564
4565                 spin_unlock(&queue->lock);
4566         }
4567 }
4568
4569 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4570 {
4571         struct hci_dev *hdev = chan->conn->hdev;
4572
4573         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4574
4575         hci_queue_acl(chan, &chan->data_q, skb, flags);
4576
4577         queue_work(hdev->workqueue, &hdev->tx_work);
4578 }
4579
4580 /* Send SCO data */
4581 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4582 {
4583         struct hci_dev *hdev = conn->hdev;
4584         struct hci_sco_hdr hdr;
4585
4586         BT_DBG("%s len %d", hdev->name, skb->len);
4587
4588         hdr.handle = cpu_to_le16(conn->handle);
4589         hdr.dlen   = skb->len;
4590
4591         skb_push(skb, HCI_SCO_HDR_SIZE);
4592         skb_reset_transport_header(skb);
4593         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4594
4595         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4596
4597         skb_queue_tail(&conn->data_q, skb);
4598         queue_work(hdev->workqueue, &hdev->tx_work);
4599 }
4600
4601 /* ---- HCI TX task (outgoing data) ---- */
4602
4603 /* HCI Connection scheduler */
4604 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4605                                      int *quote)
4606 {
4607         struct hci_conn_hash *h = &hdev->conn_hash;
4608         struct hci_conn *conn = NULL, *c;
4609         unsigned int num = 0, min = ~0;
4610
4611         /* We don't have to lock device here. Connections are always
4612          * added and removed with TX task disabled. */
4613
4614         rcu_read_lock();
4615
4616         list_for_each_entry_rcu(c, &h->list, list) {
4617                 if (c->type != type || skb_queue_empty(&c->data_q))
4618                         continue;
4619
4620                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4621                         continue;
4622
4623                 num++;
4624
4625                 if (c->sent < min) {
4626                         min  = c->sent;
4627                         conn = c;
4628                 }
4629
4630                 if (hci_conn_num(hdev, type) == num)
4631                         break;
4632         }
4633
4634         rcu_read_unlock();
4635
4636         if (conn) {
4637                 int cnt, q;
4638
4639                 switch (conn->type) {
4640                 case ACL_LINK:
4641                         cnt = hdev->acl_cnt;
4642                         break;
4643                 case SCO_LINK:
4644                 case ESCO_LINK:
4645                         cnt = hdev->sco_cnt;
4646                         break;
4647                 case LE_LINK:
4648                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4649                         break;
4650                 default:
4651                         cnt = 0;
4652                         BT_ERR("Unknown link type");
4653                 }
4654
4655                 q = cnt / num;
4656                 *quote = q ? q : 1;
4657         } else
4658                 *quote = 0;
4659
4660         BT_DBG("conn %p quote %d", conn, *quote);
4661         return conn;
4662 }
4663
4664 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4665 {
4666         struct hci_conn_hash *h = &hdev->conn_hash;
4667         struct hci_conn *c;
4668
4669         BT_ERR("%s link tx timeout", hdev->name);
4670
4671         rcu_read_lock();
4672
4673         /* Kill stalled connections */
4674         list_for_each_entry_rcu(c, &h->list, list) {
4675                 if (c->type == type && c->sent) {
4676                         BT_ERR("%s killing stalled connection %pMR",
4677                                hdev->name, &c->dst);
4678                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4679                 }
4680         }
4681
4682         rcu_read_unlock();
4683 }
4684
4685 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4686                                       int *quote)
4687 {
4688         struct hci_conn_hash *h = &hdev->conn_hash;
4689         struct hci_chan *chan = NULL;
4690         unsigned int num = 0, min = ~0, cur_prio = 0;
4691         struct hci_conn *conn;
4692         int cnt, q, conn_num = 0;
4693
4694         BT_DBG("%s", hdev->name);
4695
4696         rcu_read_lock();
4697
4698         list_for_each_entry_rcu(conn, &h->list, list) {
4699                 struct hci_chan *tmp;
4700
4701                 if (conn->type != type)
4702                         continue;
4703
4704                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4705                         continue;
4706
4707                 conn_num++;
4708
4709                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4710                         struct sk_buff *skb;
4711
4712                         if (skb_queue_empty(&tmp->data_q))
4713                                 continue;
4714
4715                         skb = skb_peek(&tmp->data_q);
4716                         if (skb->priority < cur_prio)
4717                                 continue;
4718
4719                         if (skb->priority > cur_prio) {
4720                                 num = 0;
4721                                 min = ~0;
4722                                 cur_prio = skb->priority;
4723                         }
4724
4725                         num++;
4726
4727                         if (conn->sent < min) {
4728                                 min  = conn->sent;
4729                                 chan = tmp;
4730                         }
4731                 }
4732
4733                 if (hci_conn_num(hdev, type) == conn_num)
4734                         break;
4735         }
4736
4737         rcu_read_unlock();
4738
4739         if (!chan)
4740                 return NULL;
4741
4742         switch (chan->conn->type) {
4743         case ACL_LINK:
4744                 cnt = hdev->acl_cnt;
4745                 break;
4746         case AMP_LINK:
4747                 cnt = hdev->block_cnt;
4748                 break;
4749         case SCO_LINK:
4750         case ESCO_LINK:
4751                 cnt = hdev->sco_cnt;
4752                 break;
4753         case LE_LINK:
4754                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4755                 break;
4756         default:
4757                 cnt = 0;
4758                 BT_ERR("Unknown link type");
4759         }
4760
4761         q = cnt / num;
4762         *quote = q ? q : 1;
4763         BT_DBG("chan %p quote %d", chan, *quote);
4764         return chan;
4765 }
4766
4767 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4768 {
4769         struct hci_conn_hash *h = &hdev->conn_hash;
4770         struct hci_conn *conn;
4771         int num = 0;
4772
4773         BT_DBG("%s", hdev->name);
4774
4775         rcu_read_lock();
4776
4777         list_for_each_entry_rcu(conn, &h->list, list) {
4778                 struct hci_chan *chan;
4779
4780                 if (conn->type != type)
4781                         continue;
4782
4783                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4784                         continue;
4785
4786                 num++;
4787
4788                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4789                         struct sk_buff *skb;
4790
4791                         if (chan->sent) {
4792                                 chan->sent = 0;
4793                                 continue;
4794                         }
4795
4796                         if (skb_queue_empty(&chan->data_q))
4797                                 continue;
4798
4799                         skb = skb_peek(&chan->data_q);
4800                         if (skb->priority >= HCI_PRIO_MAX - 1)
4801                                 continue;
4802
4803                         skb->priority = HCI_PRIO_MAX - 1;
4804
4805                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4806                                skb->priority);
4807                 }
4808
4809                 if (hci_conn_num(hdev, type) == num)
4810                         break;
4811         }
4812
4813         rcu_read_unlock();
4814
4815 }
4816
4817 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4818 {
4819         /* Calculate count of blocks used by this packet */
4820         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4821 }
4822
4823 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4824 {
4825         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4826                 /* ACL tx timeout must be longer than maximum
4827                  * link supervision timeout (40.9 seconds) */
4828                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4829                                        HCI_ACL_TX_TIMEOUT))
4830                         hci_link_tx_to(hdev, ACL_LINK);
4831         }
4832 }
4833
4834 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4835 {
4836         unsigned int cnt = hdev->acl_cnt;
4837         struct hci_chan *chan;
4838         struct sk_buff *skb;
4839         int quote;
4840
4841         __check_timeout(hdev, cnt);
4842
4843         while (hdev->acl_cnt &&
4844                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4845                 u32 priority = (skb_peek(&chan->data_q))->priority;
4846                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4847                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4848                                skb->len, skb->priority);
4849
4850                         /* Stop if priority has changed */
4851                         if (skb->priority < priority)
4852                                 break;
4853
4854                         skb = skb_dequeue(&chan->data_q);
4855
4856                         hci_conn_enter_active_mode(chan->conn,
4857                                                    bt_cb(skb)->force_active);
4858
4859                         hci_send_frame(hdev, skb);
4860                         hdev->acl_last_tx = jiffies;
4861
4862                         hdev->acl_cnt--;
4863                         chan->sent++;
4864                         chan->conn->sent++;
4865                 }
4866         }
4867
4868         if (cnt != hdev->acl_cnt)
4869                 hci_prio_recalculate(hdev, ACL_LINK);
4870 }
4871
4872 static void hci_sched_acl_blk(struct hci_dev *hdev)
4873 {
4874         unsigned int cnt = hdev->block_cnt;
4875         struct hci_chan *chan;
4876         struct sk_buff *skb;
4877         int quote;
4878         u8 type;
4879
4880         __check_timeout(hdev, cnt);
4881
4882         BT_DBG("%s", hdev->name);
4883
4884         if (hdev->dev_type == HCI_AMP)
4885                 type = AMP_LINK;
4886         else
4887                 type = ACL_LINK;
4888
4889         while (hdev->block_cnt > 0 &&
4890                (chan = hci_chan_sent(hdev, type, &quote))) {
4891                 u32 priority = (skb_peek(&chan->data_q))->priority;
4892                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4893                         int blocks;
4894
4895                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4896                                skb->len, skb->priority);
4897
4898                         /* Stop if priority has changed */
4899                         if (skb->priority < priority)
4900                                 break;
4901
4902                         skb = skb_dequeue(&chan->data_q);
4903
4904                         blocks = __get_blocks(hdev, skb);
4905                         if (blocks > hdev->block_cnt)
4906                                 return;
4907
4908                         hci_conn_enter_active_mode(chan->conn,
4909                                                    bt_cb(skb)->force_active);
4910
4911                         hci_send_frame(hdev, skb);
4912                         hdev->acl_last_tx = jiffies;
4913
4914                         hdev->block_cnt -= blocks;
4915                         quote -= blocks;
4916
4917                         chan->sent += blocks;
4918                         chan->conn->sent += blocks;
4919                 }
4920         }
4921
4922         if (cnt != hdev->block_cnt)
4923                 hci_prio_recalculate(hdev, type);
4924 }
4925
4926 static void hci_sched_acl(struct hci_dev *hdev)
4927 {
4928         BT_DBG("%s", hdev->name);
4929
4930         /* No ACL link over BR/EDR controller */
4931         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4932                 return;
4933
4934         /* No AMP link over AMP controller */
4935         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4936                 return;
4937
4938         switch (hdev->flow_ctl_mode) {
4939         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4940                 hci_sched_acl_pkt(hdev);
4941                 break;
4942
4943         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4944                 hci_sched_acl_blk(hdev);
4945                 break;
4946         }
4947 }
4948
4949 /* Schedule SCO */
4950 static void hci_sched_sco(struct hci_dev *hdev)
4951 {
4952         struct hci_conn *conn;
4953         struct sk_buff *skb;
4954         int quote;
4955
4956         BT_DBG("%s", hdev->name);
4957
4958         if (!hci_conn_num(hdev, SCO_LINK))
4959                 return;
4960
4961         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4962                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4963                         BT_DBG("skb %p len %d", skb, skb->len);
4964                         hci_send_frame(hdev, skb);
4965
4966                         conn->sent++;
4967                         if (conn->sent == ~0)
4968                                 conn->sent = 0;
4969                 }
4970         }
4971 }
4972
4973 static void hci_sched_esco(struct hci_dev *hdev)
4974 {
4975         struct hci_conn *conn;
4976         struct sk_buff *skb;
4977         int quote;
4978
4979         BT_DBG("%s", hdev->name);
4980
4981         if (!hci_conn_num(hdev, ESCO_LINK))
4982                 return;
4983
4984         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4985                                                      &quote))) {
4986                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4987                         BT_DBG("skb %p len %d", skb, skb->len);
4988                         hci_send_frame(hdev, skb);
4989
4990                         conn->sent++;
4991                         if (conn->sent == ~0)
4992                                 conn->sent = 0;
4993                 }
4994         }
4995 }
4996
4997 static void hci_sched_le(struct hci_dev *hdev)
4998 {
4999         struct hci_chan *chan;
5000         struct sk_buff *skb;
5001         int quote, cnt, tmp;
5002
5003         BT_DBG("%s", hdev->name);
5004
5005         if (!hci_conn_num(hdev, LE_LINK))
5006                 return;
5007
5008         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5009                 /* LE tx timeout must be longer than maximum
5010                  * link supervision timeout (40.9 seconds) */
5011                 if (!hdev->le_cnt && hdev->le_pkts &&
5012                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5013                         hci_link_tx_to(hdev, LE_LINK);
5014         }
5015
5016         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5017         tmp = cnt;
5018         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5019                 u32 priority = (skb_peek(&chan->data_q))->priority;
5020                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5021                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5022                                skb->len, skb->priority);
5023
5024                         /* Stop if priority has changed */
5025                         if (skb->priority < priority)
5026                                 break;
5027
5028                         skb = skb_dequeue(&chan->data_q);
5029
5030                         hci_send_frame(hdev, skb);
5031                         hdev->le_last_tx = jiffies;
5032
5033                         cnt--;
5034                         chan->sent++;
5035                         chan->conn->sent++;
5036                 }
5037         }
5038
5039         if (hdev->le_pkts)
5040                 hdev->le_cnt = cnt;
5041         else
5042                 hdev->acl_cnt = cnt;
5043
5044         if (cnt != tmp)
5045                 hci_prio_recalculate(hdev, LE_LINK);
5046 }
5047
5048 static void hci_tx_work(struct work_struct *work)
5049 {
5050         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5051         struct sk_buff *skb;
5052
5053         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5054                hdev->sco_cnt, hdev->le_cnt);
5055
5056         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5057                 /* Schedule queues and send stuff to HCI driver */
5058                 hci_sched_acl(hdev);
5059                 hci_sched_sco(hdev);
5060                 hci_sched_esco(hdev);
5061                 hci_sched_le(hdev);
5062         }
5063
5064         /* Send next queued raw (unknown type) packet */
5065         while ((skb = skb_dequeue(&hdev->raw_q)))
5066                 hci_send_frame(hdev, skb);
5067 }
5068
5069 /* ----- HCI RX task (incoming data processing) ----- */
5070
5071 /* ACL data packet */
5072 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5073 {
5074         struct hci_acl_hdr *hdr = (void *) skb->data;
5075         struct hci_conn *conn;
5076         __u16 handle, flags;
5077
5078         skb_pull(skb, HCI_ACL_HDR_SIZE);
5079
5080         handle = __le16_to_cpu(hdr->handle);
5081         flags  = hci_flags(handle);
5082         handle = hci_handle(handle);
5083
5084         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5085                handle, flags);
5086
5087         hdev->stat.acl_rx++;
5088
5089         hci_dev_lock(hdev);
5090         conn = hci_conn_hash_lookup_handle(hdev, handle);
5091         hci_dev_unlock(hdev);
5092
5093         if (conn) {
5094                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5095
5096                 /* Send to upper protocol */
5097                 l2cap_recv_acldata(conn, skb, flags);
5098                 return;
5099         } else {
5100                 BT_ERR("%s ACL packet for unknown connection handle %d",
5101                        hdev->name, handle);
5102         }
5103
5104         kfree_skb(skb);
5105 }
5106
5107 /* SCO data packet */
5108 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5109 {
5110         struct hci_sco_hdr *hdr = (void *) skb->data;
5111         struct hci_conn *conn;
5112         __u16 handle;
5113
5114         skb_pull(skb, HCI_SCO_HDR_SIZE);
5115
5116         handle = __le16_to_cpu(hdr->handle);
5117
5118         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5119
5120         hdev->stat.sco_rx++;
5121
5122         hci_dev_lock(hdev);
5123         conn = hci_conn_hash_lookup_handle(hdev, handle);
5124         hci_dev_unlock(hdev);
5125
5126         if (conn) {
5127                 /* Send to upper protocol */
5128                 sco_recv_scodata(conn, skb);
5129                 return;
5130         } else {
5131                 BT_ERR("%s SCO packet for unknown connection handle %d",
5132                        hdev->name, handle);
5133         }
5134
5135         kfree_skb(skb);
5136 }
5137
5138 static bool hci_req_is_complete(struct hci_dev *hdev)
5139 {
5140         struct sk_buff *skb;
5141
5142         skb = skb_peek(&hdev->cmd_q);
5143         if (!skb)
5144                 return true;
5145
5146         return bt_cb(skb)->req.start;
5147 }
5148
5149 static void hci_resend_last(struct hci_dev *hdev)
5150 {
5151         struct hci_command_hdr *sent;
5152         struct sk_buff *skb;
5153         u16 opcode;
5154
5155         if (!hdev->sent_cmd)
5156                 return;
5157
5158         sent = (void *) hdev->sent_cmd->data;
5159         opcode = __le16_to_cpu(sent->opcode);
5160         if (opcode == HCI_OP_RESET)
5161                 return;
5162
5163         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5164         if (!skb)
5165                 return;
5166
5167         skb_queue_head(&hdev->cmd_q, skb);
5168         queue_work(hdev->workqueue, &hdev->cmd_work);
5169 }
5170
5171 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5172 {
5173         hci_req_complete_t req_complete = NULL;
5174         struct sk_buff *skb;
5175         unsigned long flags;
5176
5177         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5178
5179         /* If the completed command doesn't match the last one that was
5180          * sent we need to do special handling of it.
5181          */
5182         if (!hci_sent_cmd_data(hdev, opcode)) {
5183                 /* Some CSR based controllers generate a spontaneous
5184                  * reset complete event during init and any pending
5185                  * command will never be completed. In such a case we
5186                  * need to resend whatever was the last sent
5187                  * command.
5188                  */
5189                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5190                         hci_resend_last(hdev);
5191
5192                 return;
5193         }
5194
5195         /* If the command succeeded and there's still more commands in
5196          * this request the request is not yet complete.
5197          */
5198         if (!status && !hci_req_is_complete(hdev))
5199                 return;
5200
5201         /* If this was the last command in a request the complete
5202          * callback would be found in hdev->sent_cmd instead of the
5203          * command queue (hdev->cmd_q).
5204          */
5205         if (hdev->sent_cmd) {
5206                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5207
5208                 if (req_complete) {
5209                         /* We must set the complete callback to NULL to
5210                          * avoid calling the callback more than once if
5211                          * this function gets called again.
5212                          */
5213                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5214
5215                         goto call_complete;
5216                 }
5217         }
5218
5219         /* Remove all pending commands belonging to this request */
5220         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5221         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5222                 if (bt_cb(skb)->req.start) {
5223                         __skb_queue_head(&hdev->cmd_q, skb);
5224                         break;
5225                 }
5226
5227                 req_complete = bt_cb(skb)->req.complete;
5228                 kfree_skb(skb);
5229         }
5230         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5231
5232 call_complete:
5233         if (req_complete)
5234                 req_complete(hdev, status);
5235 }
5236
5237 static void hci_rx_work(struct work_struct *work)
5238 {
5239         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5240         struct sk_buff *skb;
5241
5242         BT_DBG("%s", hdev->name);
5243
5244         while ((skb = skb_dequeue(&hdev->rx_q))) {
5245                 /* Send copy to monitor */
5246                 hci_send_to_monitor(hdev, skb);
5247
5248                 if (atomic_read(&hdev->promisc)) {
5249                         /* Send copy to the sockets */
5250                         hci_send_to_sock(hdev, skb);
5251                 }
5252
5253                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5254                         kfree_skb(skb);
5255                         continue;
5256                 }
5257
5258                 if (test_bit(HCI_INIT, &hdev->flags)) {
5259                         /* Don't process data packets in this states. */
5260                         switch (bt_cb(skb)->pkt_type) {
5261                         case HCI_ACLDATA_PKT:
5262                         case HCI_SCODATA_PKT:
5263                                 kfree_skb(skb);
5264                                 continue;
5265                         }
5266                 }
5267
5268                 /* Process frame */
5269                 switch (bt_cb(skb)->pkt_type) {
5270                 case HCI_EVENT_PKT:
5271                         BT_DBG("%s Event packet", hdev->name);
5272                         hci_event_packet(hdev, skb);
5273                         break;
5274
5275                 case HCI_ACLDATA_PKT:
5276                         BT_DBG("%s ACL data packet", hdev->name);
5277                         hci_acldata_packet(hdev, skb);
5278                         break;
5279
5280                 case HCI_SCODATA_PKT:
5281                         BT_DBG("%s SCO data packet", hdev->name);
5282                         hci_scodata_packet(hdev, skb);
5283                         break;
5284
5285                 default:
5286                         kfree_skb(skb);
5287                         break;
5288                 }
5289         }
5290 }
5291
5292 static void hci_cmd_work(struct work_struct *work)
5293 {
5294         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5295         struct sk_buff *skb;
5296
5297         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5298                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5299
5300         /* Send queued commands */
5301         if (atomic_read(&hdev->cmd_cnt)) {
5302                 skb = skb_dequeue(&hdev->cmd_q);
5303                 if (!skb)
5304                         return;
5305
5306                 kfree_skb(hdev->sent_cmd);
5307
5308                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5309                 if (hdev->sent_cmd) {
5310                         atomic_dec(&hdev->cmd_cnt);
5311                         hci_send_frame(hdev, skb);
5312                         if (test_bit(HCI_RESET, &hdev->flags))
5313                                 cancel_delayed_work(&hdev->cmd_timer);
5314                         else
5315                                 schedule_delayed_work(&hdev->cmd_timer,
5316                                                       HCI_CMD_TIMEOUT);
5317                 } else {
5318                         skb_queue_head(&hdev->cmd_q, skb);
5319                         queue_work(hdev->workqueue, &hdev->cmd_work);
5320                 }
5321         }
5322 }
5323
5324 void hci_req_add_le_scan_disable(struct hci_request *req)
5325 {
5326         struct hci_cp_le_set_scan_enable cp;
5327
5328         memset(&cp, 0, sizeof(cp));
5329         cp.enable = LE_SCAN_DISABLE;
5330         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5331 }
5332
5333 void hci_req_add_le_passive_scan(struct hci_request *req)
5334 {
5335         struct hci_cp_le_set_scan_param param_cp;
5336         struct hci_cp_le_set_scan_enable enable_cp;
5337         struct hci_dev *hdev = req->hdev;
5338         u8 own_addr_type;
5339
5340         /* Set require_privacy to false since no SCAN_REQ are send
5341          * during passive scanning. Not using an unresolvable address
5342          * here is important so that peer devices using direct
5343          * advertising with our address will be correctly reported
5344          * by the controller.
5345          */
5346         if (hci_update_random_address(req, false, &own_addr_type))
5347                 return;
5348
5349         memset(&param_cp, 0, sizeof(param_cp));
5350         param_cp.type = LE_SCAN_PASSIVE;
5351         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5352         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5353         param_cp.own_address_type = own_addr_type;
5354         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5355                     &param_cp);
5356
5357         memset(&enable_cp, 0, sizeof(enable_cp));
5358         enable_cp.enable = LE_SCAN_ENABLE;
5359         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5360         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5361                     &enable_cp);
5362 }
5363
5364 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5365 {
5366         if (status)
5367                 BT_DBG("HCI request failed to update background scanning: "
5368                        "status 0x%2.2x", status);
5369 }
5370
5371 /* This function controls the background scanning based on hdev->pend_le_conns
5372  * list. If there are pending LE connection we start the background scanning,
5373  * otherwise we stop it.
5374  *
5375  * This function requires the caller holds hdev->lock.
5376  */
5377 void hci_update_background_scan(struct hci_dev *hdev)
5378 {
5379         struct hci_request req;
5380         struct hci_conn *conn;
5381         int err;
5382
5383         if (!test_bit(HCI_UP, &hdev->flags) ||
5384             test_bit(HCI_INIT, &hdev->flags) ||
5385             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5386             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5387             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5388             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5389                 return;
5390
5391         /* No point in doing scanning if LE support hasn't been enabled */
5392         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5393                 return;
5394
5395         /* If discovery is active don't interfere with it */
5396         if (hdev->discovery.state != DISCOVERY_STOPPED)
5397                 return;
5398
5399         hci_req_init(&req, hdev);
5400
5401         if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5402             list_empty(&hdev->pend_le_conns) &&
5403             list_empty(&hdev->pend_le_reports)) {
5404                 /* If there is no pending LE connections or devices
5405                  * to be scanned for, we should stop the background
5406                  * scanning.
5407                  */
5408
5409                 /* If controller is not scanning we are done. */
5410                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5411                         return;
5412
5413                 hci_req_add_le_scan_disable(&req);
5414
5415                 BT_DBG("%s stopping background scanning", hdev->name);
5416         } else {
5417                 /* If there is at least one pending LE connection, we should
5418                  * keep the background scan running.
5419                  */
5420
5421                 /* If controller is connecting, we should not start scanning
5422                  * since some controllers are not able to scan and connect at
5423                  * the same time.
5424                  */
5425                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5426                 if (conn)
5427                         return;
5428
5429                 /* If controller is currently scanning, we stop it to ensure we
5430                  * don't miss any advertising (due to duplicates filter).
5431                  */
5432                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5433                         hci_req_add_le_scan_disable(&req);
5434
5435                 hci_req_add_le_passive_scan(&req);
5436
5437                 BT_DBG("%s starting background scanning", hdev->name);
5438         }
5439
5440         err = hci_req_run(&req, update_background_scan_complete);
5441         if (err)
5442                 BT_ERR("Failed to run HCI request: err %d", err);
5443 }