8b3f839ba82662bd424c024916e116fd4c8f58e2
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE      0
60 #define HCI_REQ_PEND      1
61 #define HCI_REQ_CANCELED  2
62
63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70         hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76                              size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         char buf[3];
80
81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82         buf[1] = '\n';
83         buf[2] = '\0';
84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88                               size_t count, loff_t *ppos)
89 {
90         struct hci_dev *hdev = file->private_data;
91         struct sk_buff *skb;
92         char buf[32];
93         size_t buf_size = min(count, (sizeof(buf)-1));
94         bool enable;
95         int err;
96
97         if (!test_bit(HCI_UP, &hdev->flags))
98                 return -ENETDOWN;
99
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124
125         if (err < 0)
126                 return err;
127
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130         return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162
163         return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187
188         return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202
203 static int uuids_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bt_uuid *uuid;
207
208         hci_dev_lock(hdev);
209         list_for_each_entry(uuid, &hdev->uuids, list) {
210                 u8 i, val[16];
211
212                 /* The Bluetooth UUID values are stored in big endian,
213                  * but with reversed byte order. So convert them into
214                  * the right order for the %pUb modifier.
215                  */
216                 for (i = 0; i < 16; i++)
217                         val[i] = uuid->uuid[15 - i];
218
219                 seq_printf(f, "%pUb\n", val);
220         }
221         hci_dev_unlock(hdev);
222
223         return 0;
224 }
225
226 static int uuids_open(struct inode *inode, struct file *file)
227 {
228         return single_open(file, uuids_show, inode->i_private);
229 }
230
231 static const struct file_operations uuids_fops = {
232         .open           = uuids_open,
233         .read           = seq_read,
234         .llseek         = seq_lseek,
235         .release        = single_release,
236 };
237
238 static int inquiry_cache_show(struct seq_file *f, void *p)
239 {
240         struct hci_dev *hdev = f->private;
241         struct discovery_state *cache = &hdev->discovery;
242         struct inquiry_entry *e;
243
244         hci_dev_lock(hdev);
245
246         list_for_each_entry(e, &cache->all, all) {
247                 struct inquiry_data *data = &e->data;
248                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249                            &data->bdaddr,
250                            data->pscan_rep_mode, data->pscan_period_mode,
251                            data->pscan_mode, data->dev_class[2],
252                            data->dev_class[1], data->dev_class[0],
253                            __le16_to_cpu(data->clock_offset),
254                            data->rssi, data->ssp_mode, e->timestamp);
255         }
256
257         hci_dev_unlock(hdev);
258
259         return 0;
260 }
261
262 static int inquiry_cache_open(struct inode *inode, struct file *file)
263 {
264         return single_open(file, inquiry_cache_show, inode->i_private);
265 }
266
267 static const struct file_operations inquiry_cache_fops = {
268         .open           = inquiry_cache_open,
269         .read           = seq_read,
270         .llseek         = seq_lseek,
271         .release        = single_release,
272 };
273
274 static int link_keys_show(struct seq_file *f, void *ptr)
275 {
276         struct hci_dev *hdev = f->private;
277         struct link_key *key;
278
279         rcu_read_lock();
280         list_for_each_entry_rcu(key, &hdev->link_keys, list)
281                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
283         rcu_read_unlock();
284
285         return 0;
286 }
287
288 static int link_keys_open(struct inode *inode, struct file *file)
289 {
290         return single_open(file, link_keys_show, inode->i_private);
291 }
292
293 static const struct file_operations link_keys_fops = {
294         .open           = link_keys_open,
295         .read           = seq_read,
296         .llseek         = seq_lseek,
297         .release        = single_release,
298 };
299
300 static int dev_class_show(struct seq_file *f, void *ptr)
301 {
302         struct hci_dev *hdev = f->private;
303
304         hci_dev_lock(hdev);
305         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306                    hdev->dev_class[1], hdev->dev_class[0]);
307         hci_dev_unlock(hdev);
308
309         return 0;
310 }
311
312 static int dev_class_open(struct inode *inode, struct file *file)
313 {
314         return single_open(file, dev_class_show, inode->i_private);
315 }
316
317 static const struct file_operations dev_class_fops = {
318         .open           = dev_class_open,
319         .read           = seq_read,
320         .llseek         = seq_lseek,
321         .release        = single_release,
322 };
323
324 static int voice_setting_get(void *data, u64 *val)
325 {
326         struct hci_dev *hdev = data;
327
328         hci_dev_lock(hdev);
329         *val = hdev->voice_setting;
330         hci_dev_unlock(hdev);
331
332         return 0;
333 }
334
335 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336                         NULL, "0x%4.4llx\n");
337
338 static int auto_accept_delay_set(void *data, u64 val)
339 {
340         struct hci_dev *hdev = data;
341
342         hci_dev_lock(hdev);
343         hdev->auto_accept_delay = val;
344         hci_dev_unlock(hdev);
345
346         return 0;
347 }
348
349 static int auto_accept_delay_get(void *data, u64 *val)
350 {
351         struct hci_dev *hdev = data;
352
353         hci_dev_lock(hdev);
354         *val = hdev->auto_accept_delay;
355         hci_dev_unlock(hdev);
356
357         return 0;
358 }
359
360 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361                         auto_accept_delay_set, "%llu\n");
362
363 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364                                      size_t count, loff_t *ppos)
365 {
366         struct hci_dev *hdev = file->private_data;
367         char buf[3];
368
369         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
370         buf[1] = '\n';
371         buf[2] = '\0';
372         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373 }
374
375 static ssize_t force_sc_support_write(struct file *file,
376                                       const char __user *user_buf,
377                                       size_t count, loff_t *ppos)
378 {
379         struct hci_dev *hdev = file->private_data;
380         char buf[32];
381         size_t buf_size = min(count, (sizeof(buf)-1));
382         bool enable;
383
384         if (test_bit(HCI_UP, &hdev->flags))
385                 return -EBUSY;
386
387         if (copy_from_user(buf, user_buf, buf_size))
388                 return -EFAULT;
389
390         buf[buf_size] = '\0';
391         if (strtobool(buf, &enable))
392                 return -EINVAL;
393
394         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
395                 return -EALREADY;
396
397         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
398
399         return count;
400 }
401
402 static const struct file_operations force_sc_support_fops = {
403         .open           = simple_open,
404         .read           = force_sc_support_read,
405         .write          = force_sc_support_write,
406         .llseek         = default_llseek,
407 };
408
409 static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410                                        size_t count, loff_t *ppos)
411 {
412         struct hci_dev *hdev = file->private_data;
413         char buf[3];
414
415         buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
416         buf[1] = '\n';
417         buf[2] = '\0';
418         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419 }
420
421 static ssize_t force_lesc_support_write(struct file *file,
422                                         const char __user *user_buf,
423                                         size_t count, loff_t *ppos)
424 {
425         struct hci_dev *hdev = file->private_data;
426         char buf[32];
427         size_t buf_size = min(count, (sizeof(buf)-1));
428         bool enable;
429
430         if (copy_from_user(buf, user_buf, buf_size))
431                 return -EFAULT;
432
433         buf[buf_size] = '\0';
434         if (strtobool(buf, &enable))
435                 return -EINVAL;
436
437         if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
438                 return -EALREADY;
439
440         change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
441
442         return count;
443 }
444
445 static const struct file_operations force_lesc_support_fops = {
446         .open           = simple_open,
447         .read           = force_lesc_support_read,
448         .write          = force_lesc_support_write,
449         .llseek         = default_llseek,
450 };
451
452 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453                                  size_t count, loff_t *ppos)
454 {
455         struct hci_dev *hdev = file->private_data;
456         char buf[3];
457
458         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
459         buf[1] = '\n';
460         buf[2] = '\0';
461         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
462 }
463
464 static const struct file_operations sc_only_mode_fops = {
465         .open           = simple_open,
466         .read           = sc_only_mode_read,
467         .llseek         = default_llseek,
468 };
469
470 static int idle_timeout_set(void *data, u64 val)
471 {
472         struct hci_dev *hdev = data;
473
474         if (val != 0 && (val < 500 || val > 3600000))
475                 return -EINVAL;
476
477         hci_dev_lock(hdev);
478         hdev->idle_timeout = val;
479         hci_dev_unlock(hdev);
480
481         return 0;
482 }
483
484 static int idle_timeout_get(void *data, u64 *val)
485 {
486         struct hci_dev *hdev = data;
487
488         hci_dev_lock(hdev);
489         *val = hdev->idle_timeout;
490         hci_dev_unlock(hdev);
491
492         return 0;
493 }
494
495 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496                         idle_timeout_set, "%llu\n");
497
498 static int rpa_timeout_set(void *data, u64 val)
499 {
500         struct hci_dev *hdev = data;
501
502         /* Require the RPA timeout to be at least 30 seconds and at most
503          * 24 hours.
504          */
505         if (val < 30 || val > (60 * 60 * 24))
506                 return -EINVAL;
507
508         hci_dev_lock(hdev);
509         hdev->rpa_timeout = val;
510         hci_dev_unlock(hdev);
511
512         return 0;
513 }
514
515 static int rpa_timeout_get(void *data, u64 *val)
516 {
517         struct hci_dev *hdev = data;
518
519         hci_dev_lock(hdev);
520         *val = hdev->rpa_timeout;
521         hci_dev_unlock(hdev);
522
523         return 0;
524 }
525
526 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527                         rpa_timeout_set, "%llu\n");
528
529 static int sniff_min_interval_set(void *data, u64 val)
530 {
531         struct hci_dev *hdev = data;
532
533         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534                 return -EINVAL;
535
536         hci_dev_lock(hdev);
537         hdev->sniff_min_interval = val;
538         hci_dev_unlock(hdev);
539
540         return 0;
541 }
542
543 static int sniff_min_interval_get(void *data, u64 *val)
544 {
545         struct hci_dev *hdev = data;
546
547         hci_dev_lock(hdev);
548         *val = hdev->sniff_min_interval;
549         hci_dev_unlock(hdev);
550
551         return 0;
552 }
553
554 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555                         sniff_min_interval_set, "%llu\n");
556
557 static int sniff_max_interval_set(void *data, u64 val)
558 {
559         struct hci_dev *hdev = data;
560
561         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562                 return -EINVAL;
563
564         hci_dev_lock(hdev);
565         hdev->sniff_max_interval = val;
566         hci_dev_unlock(hdev);
567
568         return 0;
569 }
570
571 static int sniff_max_interval_get(void *data, u64 *val)
572 {
573         struct hci_dev *hdev = data;
574
575         hci_dev_lock(hdev);
576         *val = hdev->sniff_max_interval;
577         hci_dev_unlock(hdev);
578
579         return 0;
580 }
581
582 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583                         sniff_max_interval_set, "%llu\n");
584
585 static int conn_info_min_age_set(void *data, u64 val)
586 {
587         struct hci_dev *hdev = data;
588
589         if (val == 0 || val > hdev->conn_info_max_age)
590                 return -EINVAL;
591
592         hci_dev_lock(hdev);
593         hdev->conn_info_min_age = val;
594         hci_dev_unlock(hdev);
595
596         return 0;
597 }
598
599 static int conn_info_min_age_get(void *data, u64 *val)
600 {
601         struct hci_dev *hdev = data;
602
603         hci_dev_lock(hdev);
604         *val = hdev->conn_info_min_age;
605         hci_dev_unlock(hdev);
606
607         return 0;
608 }
609
610 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611                         conn_info_min_age_set, "%llu\n");
612
613 static int conn_info_max_age_set(void *data, u64 val)
614 {
615         struct hci_dev *hdev = data;
616
617         if (val == 0 || val < hdev->conn_info_min_age)
618                 return -EINVAL;
619
620         hci_dev_lock(hdev);
621         hdev->conn_info_max_age = val;
622         hci_dev_unlock(hdev);
623
624         return 0;
625 }
626
627 static int conn_info_max_age_get(void *data, u64 *val)
628 {
629         struct hci_dev *hdev = data;
630
631         hci_dev_lock(hdev);
632         *val = hdev->conn_info_max_age;
633         hci_dev_unlock(hdev);
634
635         return 0;
636 }
637
638 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639                         conn_info_max_age_set, "%llu\n");
640
641 static int identity_show(struct seq_file *f, void *p)
642 {
643         struct hci_dev *hdev = f->private;
644         bdaddr_t addr;
645         u8 addr_type;
646
647         hci_dev_lock(hdev);
648
649         hci_copy_identity_address(hdev, &addr, &addr_type);
650
651         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
652                    16, hdev->irk, &hdev->rpa);
653
654         hci_dev_unlock(hdev);
655
656         return 0;
657 }
658
659 static int identity_open(struct inode *inode, struct file *file)
660 {
661         return single_open(file, identity_show, inode->i_private);
662 }
663
664 static const struct file_operations identity_fops = {
665         .open           = identity_open,
666         .read           = seq_read,
667         .llseek         = seq_lseek,
668         .release        = single_release,
669 };
670
671 static int random_address_show(struct seq_file *f, void *p)
672 {
673         struct hci_dev *hdev = f->private;
674
675         hci_dev_lock(hdev);
676         seq_printf(f, "%pMR\n", &hdev->random_addr);
677         hci_dev_unlock(hdev);
678
679         return 0;
680 }
681
682 static int random_address_open(struct inode *inode, struct file *file)
683 {
684         return single_open(file, random_address_show, inode->i_private);
685 }
686
687 static const struct file_operations random_address_fops = {
688         .open           = random_address_open,
689         .read           = seq_read,
690         .llseek         = seq_lseek,
691         .release        = single_release,
692 };
693
694 static int static_address_show(struct seq_file *f, void *p)
695 {
696         struct hci_dev *hdev = f->private;
697
698         hci_dev_lock(hdev);
699         seq_printf(f, "%pMR\n", &hdev->static_addr);
700         hci_dev_unlock(hdev);
701
702         return 0;
703 }
704
705 static int static_address_open(struct inode *inode, struct file *file)
706 {
707         return single_open(file, static_address_show, inode->i_private);
708 }
709
710 static const struct file_operations static_address_fops = {
711         .open           = static_address_open,
712         .read           = seq_read,
713         .llseek         = seq_lseek,
714         .release        = single_release,
715 };
716
717 static ssize_t force_static_address_read(struct file *file,
718                                          char __user *user_buf,
719                                          size_t count, loff_t *ppos)
720 {
721         struct hci_dev *hdev = file->private_data;
722         char buf[3];
723
724         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
725         buf[1] = '\n';
726         buf[2] = '\0';
727         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
728 }
729
730 static ssize_t force_static_address_write(struct file *file,
731                                           const char __user *user_buf,
732                                           size_t count, loff_t *ppos)
733 {
734         struct hci_dev *hdev = file->private_data;
735         char buf[32];
736         size_t buf_size = min(count, (sizeof(buf)-1));
737         bool enable;
738
739         if (test_bit(HCI_UP, &hdev->flags))
740                 return -EBUSY;
741
742         if (copy_from_user(buf, user_buf, buf_size))
743                 return -EFAULT;
744
745         buf[buf_size] = '\0';
746         if (strtobool(buf, &enable))
747                 return -EINVAL;
748
749         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
750                 return -EALREADY;
751
752         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
753
754         return count;
755 }
756
757 static const struct file_operations force_static_address_fops = {
758         .open           = simple_open,
759         .read           = force_static_address_read,
760         .write          = force_static_address_write,
761         .llseek         = default_llseek,
762 };
763
764 static int white_list_show(struct seq_file *f, void *ptr)
765 {
766         struct hci_dev *hdev = f->private;
767         struct bdaddr_list *b;
768
769         hci_dev_lock(hdev);
770         list_for_each_entry(b, &hdev->le_white_list, list)
771                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772         hci_dev_unlock(hdev);
773
774         return 0;
775 }
776
777 static int white_list_open(struct inode *inode, struct file *file)
778 {
779         return single_open(file, white_list_show, inode->i_private);
780 }
781
782 static const struct file_operations white_list_fops = {
783         .open           = white_list_open,
784         .read           = seq_read,
785         .llseek         = seq_lseek,
786         .release        = single_release,
787 };
788
789 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
790 {
791         struct hci_dev *hdev = f->private;
792         struct smp_irk *irk;
793
794         rcu_read_lock();
795         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
796                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797                            &irk->bdaddr, irk->addr_type,
798                            16, irk->val, &irk->rpa);
799         }
800         rcu_read_unlock();
801
802         return 0;
803 }
804
805 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
806 {
807         return single_open(file, identity_resolving_keys_show,
808                            inode->i_private);
809 }
810
811 static const struct file_operations identity_resolving_keys_fops = {
812         .open           = identity_resolving_keys_open,
813         .read           = seq_read,
814         .llseek         = seq_lseek,
815         .release        = single_release,
816 };
817
818 static int long_term_keys_show(struct seq_file *f, void *ptr)
819 {
820         struct hci_dev *hdev = f->private;
821         struct smp_ltk *ltk;
822
823         rcu_read_lock();
824         list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
825                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
826                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
828                            __le64_to_cpu(ltk->rand), 16, ltk->val);
829         rcu_read_unlock();
830
831         return 0;
832 }
833
834 static int long_term_keys_open(struct inode *inode, struct file *file)
835 {
836         return single_open(file, long_term_keys_show, inode->i_private);
837 }
838
839 static const struct file_operations long_term_keys_fops = {
840         .open           = long_term_keys_open,
841         .read           = seq_read,
842         .llseek         = seq_lseek,
843         .release        = single_release,
844 };
845
846 static int conn_min_interval_set(void *data, u64 val)
847 {
848         struct hci_dev *hdev = data;
849
850         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
851                 return -EINVAL;
852
853         hci_dev_lock(hdev);
854         hdev->le_conn_min_interval = val;
855         hci_dev_unlock(hdev);
856
857         return 0;
858 }
859
860 static int conn_min_interval_get(void *data, u64 *val)
861 {
862         struct hci_dev *hdev = data;
863
864         hci_dev_lock(hdev);
865         *val = hdev->le_conn_min_interval;
866         hci_dev_unlock(hdev);
867
868         return 0;
869 }
870
871 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872                         conn_min_interval_set, "%llu\n");
873
874 static int conn_max_interval_set(void *data, u64 val)
875 {
876         struct hci_dev *hdev = data;
877
878         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
879                 return -EINVAL;
880
881         hci_dev_lock(hdev);
882         hdev->le_conn_max_interval = val;
883         hci_dev_unlock(hdev);
884
885         return 0;
886 }
887
888 static int conn_max_interval_get(void *data, u64 *val)
889 {
890         struct hci_dev *hdev = data;
891
892         hci_dev_lock(hdev);
893         *val = hdev->le_conn_max_interval;
894         hci_dev_unlock(hdev);
895
896         return 0;
897 }
898
899 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900                         conn_max_interval_set, "%llu\n");
901
902 static int conn_latency_set(void *data, u64 val)
903 {
904         struct hci_dev *hdev = data;
905
906         if (val > 0x01f3)
907                 return -EINVAL;
908
909         hci_dev_lock(hdev);
910         hdev->le_conn_latency = val;
911         hci_dev_unlock(hdev);
912
913         return 0;
914 }
915
916 static int conn_latency_get(void *data, u64 *val)
917 {
918         struct hci_dev *hdev = data;
919
920         hci_dev_lock(hdev);
921         *val = hdev->le_conn_latency;
922         hci_dev_unlock(hdev);
923
924         return 0;
925 }
926
927 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928                         conn_latency_set, "%llu\n");
929
930 static int supervision_timeout_set(void *data, u64 val)
931 {
932         struct hci_dev *hdev = data;
933
934         if (val < 0x000a || val > 0x0c80)
935                 return -EINVAL;
936
937         hci_dev_lock(hdev);
938         hdev->le_supv_timeout = val;
939         hci_dev_unlock(hdev);
940
941         return 0;
942 }
943
944 static int supervision_timeout_get(void *data, u64 *val)
945 {
946         struct hci_dev *hdev = data;
947
948         hci_dev_lock(hdev);
949         *val = hdev->le_supv_timeout;
950         hci_dev_unlock(hdev);
951
952         return 0;
953 }
954
955 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956                         supervision_timeout_set, "%llu\n");
957
958 static int adv_channel_map_set(void *data, u64 val)
959 {
960         struct hci_dev *hdev = data;
961
962         if (val < 0x01 || val > 0x07)
963                 return -EINVAL;
964
965         hci_dev_lock(hdev);
966         hdev->le_adv_channel_map = val;
967         hci_dev_unlock(hdev);
968
969         return 0;
970 }
971
972 static int adv_channel_map_get(void *data, u64 *val)
973 {
974         struct hci_dev *hdev = data;
975
976         hci_dev_lock(hdev);
977         *val = hdev->le_adv_channel_map;
978         hci_dev_unlock(hdev);
979
980         return 0;
981 }
982
983 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984                         adv_channel_map_set, "%llu\n");
985
986 static int adv_min_interval_set(void *data, u64 val)
987 {
988         struct hci_dev *hdev = data;
989
990         if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
991                 return -EINVAL;
992
993         hci_dev_lock(hdev);
994         hdev->le_adv_min_interval = val;
995         hci_dev_unlock(hdev);
996
997         return 0;
998 }
999
1000 static int adv_min_interval_get(void *data, u64 *val)
1001 {
1002         struct hci_dev *hdev = data;
1003
1004         hci_dev_lock(hdev);
1005         *val = hdev->le_adv_min_interval;
1006         hci_dev_unlock(hdev);
1007
1008         return 0;
1009 }
1010
1011 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012                         adv_min_interval_set, "%llu\n");
1013
1014 static int adv_max_interval_set(void *data, u64 val)
1015 {
1016         struct hci_dev *hdev = data;
1017
1018         if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1019                 return -EINVAL;
1020
1021         hci_dev_lock(hdev);
1022         hdev->le_adv_max_interval = val;
1023         hci_dev_unlock(hdev);
1024
1025         return 0;
1026 }
1027
1028 static int adv_max_interval_get(void *data, u64 *val)
1029 {
1030         struct hci_dev *hdev = data;
1031
1032         hci_dev_lock(hdev);
1033         *val = hdev->le_adv_max_interval;
1034         hci_dev_unlock(hdev);
1035
1036         return 0;
1037 }
1038
1039 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040                         adv_max_interval_set, "%llu\n");
1041
1042 static int device_list_show(struct seq_file *f, void *ptr)
1043 {
1044         struct hci_dev *hdev = f->private;
1045         struct hci_conn_params *p;
1046         struct bdaddr_list *b;
1047
1048         hci_dev_lock(hdev);
1049         list_for_each_entry(b, &hdev->whitelist, list)
1050                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
1051         list_for_each_entry(p, &hdev->le_conn_params, list) {
1052                 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
1053                            p->auto_connect);
1054         }
1055         hci_dev_unlock(hdev);
1056
1057         return 0;
1058 }
1059
1060 static int device_list_open(struct inode *inode, struct file *file)
1061 {
1062         return single_open(file, device_list_show, inode->i_private);
1063 }
1064
1065 static const struct file_operations device_list_fops = {
1066         .open           = device_list_open,
1067         .read           = seq_read,
1068         .llseek         = seq_lseek,
1069         .release        = single_release,
1070 };
1071
1072 /* ---- HCI requests ---- */
1073
1074 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1075 {
1076         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1077
1078         if (hdev->req_status == HCI_REQ_PEND) {
1079                 hdev->req_result = result;
1080                 hdev->req_status = HCI_REQ_DONE;
1081                 wake_up_interruptible(&hdev->req_wait_q);
1082         }
1083 }
1084
1085 static void hci_req_cancel(struct hci_dev *hdev, int err)
1086 {
1087         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1088
1089         if (hdev->req_status == HCI_REQ_PEND) {
1090                 hdev->req_result = err;
1091                 hdev->req_status = HCI_REQ_CANCELED;
1092                 wake_up_interruptible(&hdev->req_wait_q);
1093         }
1094 }
1095
1096 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1097                                             u8 event)
1098 {
1099         struct hci_ev_cmd_complete *ev;
1100         struct hci_event_hdr *hdr;
1101         struct sk_buff *skb;
1102
1103         hci_dev_lock(hdev);
1104
1105         skb = hdev->recv_evt;
1106         hdev->recv_evt = NULL;
1107
1108         hci_dev_unlock(hdev);
1109
1110         if (!skb)
1111                 return ERR_PTR(-ENODATA);
1112
1113         if (skb->len < sizeof(*hdr)) {
1114                 BT_ERR("Too short HCI event");
1115                 goto failed;
1116         }
1117
1118         hdr = (void *) skb->data;
1119         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1120
1121         if (event) {
1122                 if (hdr->evt != event)
1123                         goto failed;
1124                 return skb;
1125         }
1126
1127         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1128                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1129                 goto failed;
1130         }
1131
1132         if (skb->len < sizeof(*ev)) {
1133                 BT_ERR("Too short cmd_complete event");
1134                 goto failed;
1135         }
1136
1137         ev = (void *) skb->data;
1138         skb_pull(skb, sizeof(*ev));
1139
1140         if (opcode == __le16_to_cpu(ev->opcode))
1141                 return skb;
1142
1143         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1144                __le16_to_cpu(ev->opcode));
1145
1146 failed:
1147         kfree_skb(skb);
1148         return ERR_PTR(-ENODATA);
1149 }
1150
1151 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1152                                   const void *param, u8 event, u32 timeout)
1153 {
1154         DECLARE_WAITQUEUE(wait, current);
1155         struct hci_request req;
1156         int err = 0;
1157
1158         BT_DBG("%s", hdev->name);
1159
1160         hci_req_init(&req, hdev);
1161
1162         hci_req_add_ev(&req, opcode, plen, param, event);
1163
1164         hdev->req_status = HCI_REQ_PEND;
1165
1166         add_wait_queue(&hdev->req_wait_q, &wait);
1167         set_current_state(TASK_INTERRUPTIBLE);
1168
1169         err = hci_req_run(&req, hci_req_sync_complete);
1170         if (err < 0) {
1171                 remove_wait_queue(&hdev->req_wait_q, &wait);
1172                 set_current_state(TASK_RUNNING);
1173                 return ERR_PTR(err);
1174         }
1175
1176         schedule_timeout(timeout);
1177
1178         remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180         if (signal_pending(current))
1181                 return ERR_PTR(-EINTR);
1182
1183         switch (hdev->req_status) {
1184         case HCI_REQ_DONE:
1185                 err = -bt_to_errno(hdev->req_result);
1186                 break;
1187
1188         case HCI_REQ_CANCELED:
1189                 err = -hdev->req_result;
1190                 break;
1191
1192         default:
1193                 err = -ETIMEDOUT;
1194                 break;
1195         }
1196
1197         hdev->req_status = hdev->req_result = 0;
1198
1199         BT_DBG("%s end: err %d", hdev->name, err);
1200
1201         if (err < 0)
1202                 return ERR_PTR(err);
1203
1204         return hci_get_cmd_complete(hdev, opcode, event);
1205 }
1206 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1207
1208 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1209                                const void *param, u32 timeout)
1210 {
1211         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1212 }
1213 EXPORT_SYMBOL(__hci_cmd_sync);
1214
1215 /* Execute request and wait for completion. */
1216 static int __hci_req_sync(struct hci_dev *hdev,
1217                           void (*func)(struct hci_request *req,
1218                                       unsigned long opt),
1219                           unsigned long opt, __u32 timeout)
1220 {
1221         struct hci_request req;
1222         DECLARE_WAITQUEUE(wait, current);
1223         int err = 0;
1224
1225         BT_DBG("%s start", hdev->name);
1226
1227         hci_req_init(&req, hdev);
1228
1229         hdev->req_status = HCI_REQ_PEND;
1230
1231         func(&req, opt);
1232
1233         add_wait_queue(&hdev->req_wait_q, &wait);
1234         set_current_state(TASK_INTERRUPTIBLE);
1235
1236         err = hci_req_run(&req, hci_req_sync_complete);
1237         if (err < 0) {
1238                 hdev->req_status = 0;
1239
1240                 remove_wait_queue(&hdev->req_wait_q, &wait);
1241                 set_current_state(TASK_RUNNING);
1242
1243                 /* ENODATA means the HCI request command queue is empty.
1244                  * This can happen when a request with conditionals doesn't
1245                  * trigger any commands to be sent. This is normal behavior
1246                  * and should not trigger an error return.
1247                  */
1248                 if (err == -ENODATA)
1249                         return 0;
1250
1251                 return err;
1252         }
1253
1254         schedule_timeout(timeout);
1255
1256         remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258         if (signal_pending(current))
1259                 return -EINTR;
1260
1261         switch (hdev->req_status) {
1262         case HCI_REQ_DONE:
1263                 err = -bt_to_errno(hdev->req_result);
1264                 break;
1265
1266         case HCI_REQ_CANCELED:
1267                 err = -hdev->req_result;
1268                 break;
1269
1270         default:
1271                 err = -ETIMEDOUT;
1272                 break;
1273         }
1274
1275         hdev->req_status = hdev->req_result = 0;
1276
1277         BT_DBG("%s end: err %d", hdev->name, err);
1278
1279         return err;
1280 }
1281
1282 static int hci_req_sync(struct hci_dev *hdev,
1283                         void (*req)(struct hci_request *req,
1284                                     unsigned long opt),
1285                         unsigned long opt, __u32 timeout)
1286 {
1287         int ret;
1288
1289         if (!test_bit(HCI_UP, &hdev->flags))
1290                 return -ENETDOWN;
1291
1292         /* Serialize all requests */
1293         hci_req_lock(hdev);
1294         ret = __hci_req_sync(hdev, req, opt, timeout);
1295         hci_req_unlock(hdev);
1296
1297         return ret;
1298 }
1299
1300 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1301 {
1302         BT_DBG("%s %ld", req->hdev->name, opt);
1303
1304         /* Reset device */
1305         set_bit(HCI_RESET, &req->hdev->flags);
1306         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1307 }
1308
1309 static void bredr_init(struct hci_request *req)
1310 {
1311         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1312
1313         /* Read Local Supported Features */
1314         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1315
1316         /* Read Local Version */
1317         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1318
1319         /* Read BD Address */
1320         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1321 }
1322
1323 static void amp_init(struct hci_request *req)
1324 {
1325         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1326
1327         /* Read Local Version */
1328         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1329
1330         /* Read Local Supported Commands */
1331         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333         /* Read Local Supported Features */
1334         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
1336         /* Read Local AMP Info */
1337         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1338
1339         /* Read Data Blk size */
1340         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1341
1342         /* Read Flow Control Mode */
1343         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
1345         /* Read Location Data */
1346         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1347 }
1348
1349 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1350 {
1351         struct hci_dev *hdev = req->hdev;
1352
1353         BT_DBG("%s %ld", hdev->name, opt);
1354
1355         /* Reset */
1356         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1357                 hci_reset_req(req, 0);
1358
1359         switch (hdev->dev_type) {
1360         case HCI_BREDR:
1361                 bredr_init(req);
1362                 break;
1363
1364         case HCI_AMP:
1365                 amp_init(req);
1366                 break;
1367
1368         default:
1369                 BT_ERR("Unknown device type %d", hdev->dev_type);
1370                 break;
1371         }
1372 }
1373
1374 static void bredr_setup(struct hci_request *req)
1375 {
1376         struct hci_dev *hdev = req->hdev;
1377
1378         __le16 param;
1379         __u8 flt_type;
1380
1381         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1382         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1383
1384         /* Read Class of Device */
1385         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1386
1387         /* Read Local Name */
1388         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1389
1390         /* Read Voice Setting */
1391         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1392
1393         /* Read Number of Supported IAC */
1394         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
1396         /* Read Current IAC LAP */
1397         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
1399         /* Clear Event Filters */
1400         flt_type = HCI_FLT_CLEAR_ALL;
1401         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1402
1403         /* Connection accept timeout ~20 secs */
1404         param = cpu_to_le16(0x7d00);
1405         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1406
1407         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408          * but it does not support page scan related HCI commands.
1409          */
1410         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1411                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413         }
1414 }
1415
1416 static void le_setup(struct hci_request *req)
1417 {
1418         struct hci_dev *hdev = req->hdev;
1419
1420         /* Read LE Buffer Size */
1421         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1422
1423         /* Read LE Local Supported Features */
1424         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1425
1426         /* Read LE Supported States */
1427         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
1429         /* Read LE White List Size */
1430         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1431
1432         /* Clear LE White List */
1433         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1434
1435         /* LE-only controllers have LE implicitly enabled */
1436         if (!lmp_bredr_capable(hdev))
1437                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1438 }
1439
1440 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1441 {
1442         if (lmp_ext_inq_capable(hdev))
1443                 return 0x02;
1444
1445         if (lmp_inq_rssi_capable(hdev))
1446                 return 0x01;
1447
1448         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1449             hdev->lmp_subver == 0x0757)
1450                 return 0x01;
1451
1452         if (hdev->manufacturer == 15) {
1453                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1454                         return 0x01;
1455                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1456                         return 0x01;
1457                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1458                         return 0x01;
1459         }
1460
1461         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1462             hdev->lmp_subver == 0x1805)
1463                 return 0x01;
1464
1465         return 0x00;
1466 }
1467
1468 static void hci_setup_inquiry_mode(struct hci_request *req)
1469 {
1470         u8 mode;
1471
1472         mode = hci_get_inquiry_mode(req->hdev);
1473
1474         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1475 }
1476
1477 static void hci_setup_event_mask(struct hci_request *req)
1478 {
1479         struct hci_dev *hdev = req->hdev;
1480
1481         /* The second byte is 0xff instead of 0x9f (two reserved bits
1482          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1483          * command otherwise.
1484          */
1485         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1486
1487         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1488          * any event mask for pre 1.2 devices.
1489          */
1490         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1491                 return;
1492
1493         if (lmp_bredr_capable(hdev)) {
1494                 events[4] |= 0x01; /* Flow Specification Complete */
1495                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1497                 events[5] |= 0x08; /* Synchronous Connection Complete */
1498                 events[5] |= 0x10; /* Synchronous Connection Changed */
1499         } else {
1500                 /* Use a different default for LE-only devices */
1501                 memset(events, 0, sizeof(events));
1502                 events[0] |= 0x10; /* Disconnection Complete */
1503                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1504                 events[1] |= 0x20; /* Command Complete */
1505                 events[1] |= 0x40; /* Command Status */
1506                 events[1] |= 0x80; /* Hardware Error */
1507                 events[2] |= 0x04; /* Number of Completed Packets */
1508                 events[3] |= 0x02; /* Data Buffer Overflow */
1509
1510                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1511                         events[0] |= 0x80; /* Encryption Change */
1512                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1513                 }
1514         }
1515
1516         if (lmp_inq_rssi_capable(hdev))
1517                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519         if (lmp_sniffsubr_capable(hdev))
1520                 events[5] |= 0x20; /* Sniff Subrating */
1521
1522         if (lmp_pause_enc_capable(hdev))
1523                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525         if (lmp_ext_inq_capable(hdev))
1526                 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528         if (lmp_no_flush_capable(hdev))
1529                 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531         if (lmp_lsto_capable(hdev))
1532                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534         if (lmp_ssp_capable(hdev)) {
1535                 events[6] |= 0x01;      /* IO Capability Request */
1536                 events[6] |= 0x02;      /* IO Capability Response */
1537                 events[6] |= 0x04;      /* User Confirmation Request */
1538                 events[6] |= 0x08;      /* User Passkey Request */
1539                 events[6] |= 0x10;      /* Remote OOB Data Request */
1540                 events[6] |= 0x20;      /* Simple Pairing Complete */
1541                 events[7] |= 0x04;      /* User Passkey Notification */
1542                 events[7] |= 0x08;      /* Keypress Notification */
1543                 events[7] |= 0x10;      /* Remote Host Supported
1544                                          * Features Notification
1545                                          */
1546         }
1547
1548         if (lmp_le_capable(hdev))
1549                 events[7] |= 0x20;      /* LE Meta-Event */
1550
1551         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1552 }
1553
1554 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1555 {
1556         struct hci_dev *hdev = req->hdev;
1557
1558         if (lmp_bredr_capable(hdev))
1559                 bredr_setup(req);
1560         else
1561                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1562
1563         if (lmp_le_capable(hdev))
1564                 le_setup(req);
1565
1566         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1567          * local supported commands HCI command.
1568          */
1569         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1570                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1571
1572         if (lmp_ssp_capable(hdev)) {
1573                 /* When SSP is available, then the host features page
1574                  * should also be available as well. However some
1575                  * controllers list the max_page as 0 as long as SSP
1576                  * has not been enabled. To achieve proper debugging
1577                  * output, force the minimum max_page to 1 at least.
1578                  */
1579                 hdev->max_page = 0x01;
1580
1581                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1582                         u8 mode = 0x01;
1583                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1584                                     sizeof(mode), &mode);
1585                 } else {
1586                         struct hci_cp_write_eir cp;
1587
1588                         memset(hdev->eir, 0, sizeof(hdev->eir));
1589                         memset(&cp, 0, sizeof(cp));
1590
1591                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1592                 }
1593         }
1594
1595         if (lmp_inq_rssi_capable(hdev))
1596                 hci_setup_inquiry_mode(req);
1597
1598         if (lmp_inq_tx_pwr_capable(hdev))
1599                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1600
1601         if (lmp_ext_feat_capable(hdev)) {
1602                 struct hci_cp_read_local_ext_features cp;
1603
1604                 cp.page = 0x01;
1605                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1606                             sizeof(cp), &cp);
1607         }
1608
1609         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1610                 u8 enable = 1;
1611                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1612                             &enable);
1613         }
1614 }
1615
1616 static void hci_setup_link_policy(struct hci_request *req)
1617 {
1618         struct hci_dev *hdev = req->hdev;
1619         struct hci_cp_write_def_link_policy cp;
1620         u16 link_policy = 0;
1621
1622         if (lmp_rswitch_capable(hdev))
1623                 link_policy |= HCI_LP_RSWITCH;
1624         if (lmp_hold_capable(hdev))
1625                 link_policy |= HCI_LP_HOLD;
1626         if (lmp_sniff_capable(hdev))
1627                 link_policy |= HCI_LP_SNIFF;
1628         if (lmp_park_capable(hdev))
1629                 link_policy |= HCI_LP_PARK;
1630
1631         cp.policy = cpu_to_le16(link_policy);
1632         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1633 }
1634
1635 static void hci_set_le_support(struct hci_request *req)
1636 {
1637         struct hci_dev *hdev = req->hdev;
1638         struct hci_cp_write_le_host_supported cp;
1639
1640         /* LE-only devices do not support explicit enablement */
1641         if (!lmp_bredr_capable(hdev))
1642                 return;
1643
1644         memset(&cp, 0, sizeof(cp));
1645
1646         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1647                 cp.le = 0x01;
1648                 cp.simul = 0x00;
1649         }
1650
1651         if (cp.le != lmp_host_le_capable(hdev))
1652                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1653                             &cp);
1654 }
1655
1656 static void hci_set_event_mask_page_2(struct hci_request *req)
1657 {
1658         struct hci_dev *hdev = req->hdev;
1659         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1660
1661         /* If Connectionless Slave Broadcast master role is supported
1662          * enable all necessary events for it.
1663          */
1664         if (lmp_csb_master_capable(hdev)) {
1665                 events[1] |= 0x40;      /* Triggered Clock Capture */
1666                 events[1] |= 0x80;      /* Synchronization Train Complete */
1667                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1668                 events[2] |= 0x20;      /* CSB Channel Map Change */
1669         }
1670
1671         /* If Connectionless Slave Broadcast slave role is supported
1672          * enable all necessary events for it.
1673          */
1674         if (lmp_csb_slave_capable(hdev)) {
1675                 events[2] |= 0x01;      /* Synchronization Train Received */
1676                 events[2] |= 0x02;      /* CSB Receive */
1677                 events[2] |= 0x04;      /* CSB Timeout */
1678                 events[2] |= 0x08;      /* Truncated Page Complete */
1679         }
1680
1681         /* Enable Authenticated Payload Timeout Expired event if supported */
1682         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1683                 events[2] |= 0x80;
1684
1685         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1686 }
1687
1688 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1689 {
1690         struct hci_dev *hdev = req->hdev;
1691         u8 p;
1692
1693         hci_setup_event_mask(req);
1694
1695         /* Some Broadcom based Bluetooth controllers do not support the
1696          * Delete Stored Link Key command. They are clearly indicating its
1697          * absence in the bit mask of supported commands.
1698          *
1699          * Check the supported commands and only if the the command is marked
1700          * as supported send it. If not supported assume that the controller
1701          * does not have actual support for stored link keys which makes this
1702          * command redundant anyway.
1703          *
1704          * Some controllers indicate that they support handling deleting
1705          * stored link keys, but they don't. The quirk lets a driver
1706          * just disable this command.
1707          */
1708         if (hdev->commands[6] & 0x80 &&
1709             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1710                 struct hci_cp_delete_stored_link_key cp;
1711
1712                 bacpy(&cp.bdaddr, BDADDR_ANY);
1713                 cp.delete_all = 0x01;
1714                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1715                             sizeof(cp), &cp);
1716         }
1717
1718         if (hdev->commands[5] & 0x10)
1719                 hci_setup_link_policy(req);
1720
1721         if (lmp_le_capable(hdev)) {
1722                 u8 events[8];
1723
1724                 memset(events, 0, sizeof(events));
1725                 events[0] = 0x0f;
1726
1727                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1728                         events[0] |= 0x10;      /* LE Long Term Key Request */
1729
1730                 /* If controller supports the Connection Parameters Request
1731                  * Link Layer Procedure, enable the corresponding event.
1732                  */
1733                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1734                         events[0] |= 0x20;      /* LE Remote Connection
1735                                                  * Parameter Request
1736                                                  */
1737
1738                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1739                             events);
1740
1741                 if (hdev->commands[25] & 0x40) {
1742                         /* Read LE Advertising Channel TX Power */
1743                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1744                 }
1745
1746                 hci_set_le_support(req);
1747         }
1748
1749         /* Read features beyond page 1 if available */
1750         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1751                 struct hci_cp_read_local_ext_features cp;
1752
1753                 cp.page = p;
1754                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1755                             sizeof(cp), &cp);
1756         }
1757 }
1758
1759 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1760 {
1761         struct hci_dev *hdev = req->hdev;
1762
1763         /* Set event mask page 2 if the HCI command for it is supported */
1764         if (hdev->commands[22] & 0x04)
1765                 hci_set_event_mask_page_2(req);
1766
1767         /* Read local codec list if the HCI command is supported */
1768         if (hdev->commands[29] & 0x20)
1769                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1770
1771         /* Get MWS transport configuration if the HCI command is supported */
1772         if (hdev->commands[30] & 0x08)
1773                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1774
1775         /* Check for Synchronization Train support */
1776         if (lmp_sync_train_capable(hdev))
1777                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1778
1779         /* Enable Secure Connections if supported and configured */
1780         if (bredr_sc_enabled(hdev)) {
1781                 u8 support = 0x01;
1782                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1783                             sizeof(support), &support);
1784         }
1785 }
1786
1787 static int __hci_init(struct hci_dev *hdev)
1788 {
1789         int err;
1790
1791         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1792         if (err < 0)
1793                 return err;
1794
1795         /* The Device Under Test (DUT) mode is special and available for
1796          * all controller types. So just create it early on.
1797          */
1798         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1799                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1800                                     &dut_mode_fops);
1801         }
1802
1803         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1804          * BR/EDR/LE type controllers. AMP controllers only need the
1805          * first stage init.
1806          */
1807         if (hdev->dev_type != HCI_BREDR)
1808                 return 0;
1809
1810         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1811         if (err < 0)
1812                 return err;
1813
1814         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1815         if (err < 0)
1816                 return err;
1817
1818         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1819         if (err < 0)
1820                 return err;
1821
1822         /* Only create debugfs entries during the initial setup
1823          * phase and not every time the controller gets powered on.
1824          */
1825         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1826                 return 0;
1827
1828         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1829                             &features_fops);
1830         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1831                            &hdev->manufacturer);
1832         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1833         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1834         debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1835                             &device_list_fops);
1836         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1837                             &blacklist_fops);
1838         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1839
1840         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1841                             &conn_info_min_age_fops);
1842         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1843                             &conn_info_max_age_fops);
1844
1845         if (lmp_bredr_capable(hdev)) {
1846                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1847                                     hdev, &inquiry_cache_fops);
1848                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1849                                     hdev, &link_keys_fops);
1850                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1851                                     hdev, &dev_class_fops);
1852                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1853                                     hdev, &voice_setting_fops);
1854         }
1855
1856         if (lmp_ssp_capable(hdev)) {
1857                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1858                                     hdev, &auto_accept_delay_fops);
1859                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1860                                     hdev, &force_sc_support_fops);
1861                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1862                                     hdev, &sc_only_mode_fops);
1863                 if (lmp_le_capable(hdev))
1864                         debugfs_create_file("force_lesc_support", 0644,
1865                                             hdev->debugfs, hdev,
1866                                             &force_lesc_support_fops);
1867         }
1868
1869         if (lmp_sniff_capable(hdev)) {
1870                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1871                                     hdev, &idle_timeout_fops);
1872                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1873                                     hdev, &sniff_min_interval_fops);
1874                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1875                                     hdev, &sniff_max_interval_fops);
1876         }
1877
1878         if (lmp_le_capable(hdev)) {
1879                 debugfs_create_file("identity", 0400, hdev->debugfs,
1880                                     hdev, &identity_fops);
1881                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1882                                     hdev, &rpa_timeout_fops);
1883                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1884                                     hdev, &random_address_fops);
1885                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1886                                     hdev, &static_address_fops);
1887
1888                 /* For controllers with a public address, provide a debug
1889                  * option to force the usage of the configured static
1890                  * address. By default the public address is used.
1891                  */
1892                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1893                         debugfs_create_file("force_static_address", 0644,
1894                                             hdev->debugfs, hdev,
1895                                             &force_static_address_fops);
1896
1897                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1898                                   &hdev->le_white_list_size);
1899                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1900                                     &white_list_fops);
1901                 debugfs_create_file("identity_resolving_keys", 0400,
1902                                     hdev->debugfs, hdev,
1903                                     &identity_resolving_keys_fops);
1904                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1905                                     hdev, &long_term_keys_fops);
1906                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1907                                     hdev, &conn_min_interval_fops);
1908                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1909                                     hdev, &conn_max_interval_fops);
1910                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1911                                     hdev, &conn_latency_fops);
1912                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1913                                     hdev, &supervision_timeout_fops);
1914                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1915                                     hdev, &adv_channel_map_fops);
1916                 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1917                                     hdev, &adv_min_interval_fops);
1918                 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1919                                     hdev, &adv_max_interval_fops);
1920                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1921                                    hdev->debugfs,
1922                                    &hdev->discov_interleaved_timeout);
1923
1924                 smp_register(hdev);
1925         }
1926
1927         return 0;
1928 }
1929
1930 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1931 {
1932         struct hci_dev *hdev = req->hdev;
1933
1934         BT_DBG("%s %ld", hdev->name, opt);
1935
1936         /* Reset */
1937         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1938                 hci_reset_req(req, 0);
1939
1940         /* Read Local Version */
1941         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1942
1943         /* Read BD Address */
1944         if (hdev->set_bdaddr)
1945                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1946 }
1947
1948 static int __hci_unconf_init(struct hci_dev *hdev)
1949 {
1950         int err;
1951
1952         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1953                 return 0;
1954
1955         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1956         if (err < 0)
1957                 return err;
1958
1959         return 0;
1960 }
1961
1962 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1963 {
1964         __u8 scan = opt;
1965
1966         BT_DBG("%s %x", req->hdev->name, scan);
1967
1968         /* Inquiry and Page scans */
1969         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1970 }
1971
1972 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1973 {
1974         __u8 auth = opt;
1975
1976         BT_DBG("%s %x", req->hdev->name, auth);
1977
1978         /* Authentication */
1979         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1980 }
1981
1982 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1983 {
1984         __u8 encrypt = opt;
1985
1986         BT_DBG("%s %x", req->hdev->name, encrypt);
1987
1988         /* Encryption */
1989         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1990 }
1991
1992 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1993 {
1994         __le16 policy = cpu_to_le16(opt);
1995
1996         BT_DBG("%s %x", req->hdev->name, policy);
1997
1998         /* Default link policy */
1999         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
2000 }
2001
2002 /* Get HCI device by index.
2003  * Device is held on return. */
2004 struct hci_dev *hci_dev_get(int index)
2005 {
2006         struct hci_dev *hdev = NULL, *d;
2007
2008         BT_DBG("%d", index);
2009
2010         if (index < 0)
2011                 return NULL;
2012
2013         read_lock(&hci_dev_list_lock);
2014         list_for_each_entry(d, &hci_dev_list, list) {
2015                 if (d->id == index) {
2016                         hdev = hci_dev_hold(d);
2017                         break;
2018                 }
2019         }
2020         read_unlock(&hci_dev_list_lock);
2021         return hdev;
2022 }
2023
2024 /* ---- Inquiry support ---- */
2025
2026 bool hci_discovery_active(struct hci_dev *hdev)
2027 {
2028         struct discovery_state *discov = &hdev->discovery;
2029
2030         switch (discov->state) {
2031         case DISCOVERY_FINDING:
2032         case DISCOVERY_RESOLVING:
2033                 return true;
2034
2035         default:
2036                 return false;
2037         }
2038 }
2039
2040 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2041 {
2042         int old_state = hdev->discovery.state;
2043
2044         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2045
2046         if (old_state == state)
2047                 return;
2048
2049         hdev->discovery.state = state;
2050
2051         switch (state) {
2052         case DISCOVERY_STOPPED:
2053                 hci_update_background_scan(hdev);
2054
2055                 /* Reset RSSI and UUID filters to ensure Start Discovery
2056                  * and Start Service Discovery operate properly no matter
2057                  * which one started the previous discovery.
2058                  *
2059                  * While the Start Discovery and Start Service Discovery
2060                  * operations will set proper values for RSSI and UUID
2061                  * count, it is important to actually free the allocated
2062                  * list of UUIDs here.
2063                  */
2064                 hci_discovery_filter_clear(hdev);
2065
2066                 if (old_state != DISCOVERY_STARTING)
2067                         mgmt_discovering(hdev, 0);
2068                 break;
2069         case DISCOVERY_STARTING:
2070                 break;
2071         case DISCOVERY_FINDING:
2072                 mgmt_discovering(hdev, 1);
2073                 break;
2074         case DISCOVERY_RESOLVING:
2075                 break;
2076         case DISCOVERY_STOPPING:
2077                 break;
2078         }
2079 }
2080
2081 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2082 {
2083         struct discovery_state *cache = &hdev->discovery;
2084         struct inquiry_entry *p, *n;
2085
2086         list_for_each_entry_safe(p, n, &cache->all, all) {
2087                 list_del(&p->all);
2088                 kfree(p);
2089         }
2090
2091         INIT_LIST_HEAD(&cache->unknown);
2092         INIT_LIST_HEAD(&cache->resolve);
2093 }
2094
2095 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2096                                                bdaddr_t *bdaddr)
2097 {
2098         struct discovery_state *cache = &hdev->discovery;
2099         struct inquiry_entry *e;
2100
2101         BT_DBG("cache %p, %pMR", cache, bdaddr);
2102
2103         list_for_each_entry(e, &cache->all, all) {
2104                 if (!bacmp(&e->data.bdaddr, bdaddr))
2105                         return e;
2106         }
2107
2108         return NULL;
2109 }
2110
2111 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2112                                                        bdaddr_t *bdaddr)
2113 {
2114         struct discovery_state *cache = &hdev->discovery;
2115         struct inquiry_entry *e;
2116
2117         BT_DBG("cache %p, %pMR", cache, bdaddr);
2118
2119         list_for_each_entry(e, &cache->unknown, list) {
2120                 if (!bacmp(&e->data.bdaddr, bdaddr))
2121                         return e;
2122         }
2123
2124         return NULL;
2125 }
2126
2127 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2128                                                        bdaddr_t *bdaddr,
2129                                                        int state)
2130 {
2131         struct discovery_state *cache = &hdev->discovery;
2132         struct inquiry_entry *e;
2133
2134         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2135
2136         list_for_each_entry(e, &cache->resolve, list) {
2137                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2138                         return e;
2139                 if (!bacmp(&e->data.bdaddr, bdaddr))
2140                         return e;
2141         }
2142
2143         return NULL;
2144 }
2145
2146 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2147                                       struct inquiry_entry *ie)
2148 {
2149         struct discovery_state *cache = &hdev->discovery;
2150         struct list_head *pos = &cache->resolve;
2151         struct inquiry_entry *p;
2152
2153         list_del(&ie->list);
2154
2155         list_for_each_entry(p, &cache->resolve, list) {
2156                 if (p->name_state != NAME_PENDING &&
2157                     abs(p->data.rssi) >= abs(ie->data.rssi))
2158                         break;
2159                 pos = &p->list;
2160         }
2161
2162         list_add(&ie->list, pos);
2163 }
2164
2165 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2166                              bool name_known)
2167 {
2168         struct discovery_state *cache = &hdev->discovery;
2169         struct inquiry_entry *ie;
2170         u32 flags = 0;
2171
2172         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2173
2174         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2175
2176         if (!data->ssp_mode)
2177                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2178
2179         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2180         if (ie) {
2181                 if (!ie->data.ssp_mode)
2182                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2183
2184                 if (ie->name_state == NAME_NEEDED &&
2185                     data->rssi != ie->data.rssi) {
2186                         ie->data.rssi = data->rssi;
2187                         hci_inquiry_cache_update_resolve(hdev, ie);
2188                 }
2189
2190                 goto update;
2191         }
2192
2193         /* Entry not in the cache. Add new one. */
2194         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2195         if (!ie) {
2196                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2197                 goto done;
2198         }
2199
2200         list_add(&ie->all, &cache->all);
2201
2202         if (name_known) {
2203                 ie->name_state = NAME_KNOWN;
2204         } else {
2205                 ie->name_state = NAME_NOT_KNOWN;
2206                 list_add(&ie->list, &cache->unknown);
2207         }
2208
2209 update:
2210         if (name_known && ie->name_state != NAME_KNOWN &&
2211             ie->name_state != NAME_PENDING) {
2212                 ie->name_state = NAME_KNOWN;
2213                 list_del(&ie->list);
2214         }
2215
2216         memcpy(&ie->data, data, sizeof(*data));
2217         ie->timestamp = jiffies;
2218         cache->timestamp = jiffies;
2219
2220         if (ie->name_state == NAME_NOT_KNOWN)
2221                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2222
2223 done:
2224         return flags;
2225 }
2226
2227 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2228 {
2229         struct discovery_state *cache = &hdev->discovery;
2230         struct inquiry_info *info = (struct inquiry_info *) buf;
2231         struct inquiry_entry *e;
2232         int copied = 0;
2233
2234         list_for_each_entry(e, &cache->all, all) {
2235                 struct inquiry_data *data = &e->data;
2236
2237                 if (copied >= num)
2238                         break;
2239
2240                 bacpy(&info->bdaddr, &data->bdaddr);
2241                 info->pscan_rep_mode    = data->pscan_rep_mode;
2242                 info->pscan_period_mode = data->pscan_period_mode;
2243                 info->pscan_mode        = data->pscan_mode;
2244                 memcpy(info->dev_class, data->dev_class, 3);
2245                 info->clock_offset      = data->clock_offset;
2246
2247                 info++;
2248                 copied++;
2249         }
2250
2251         BT_DBG("cache %p, copied %d", cache, copied);
2252         return copied;
2253 }
2254
2255 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2256 {
2257         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2258         struct hci_dev *hdev = req->hdev;
2259         struct hci_cp_inquiry cp;
2260
2261         BT_DBG("%s", hdev->name);
2262
2263         if (test_bit(HCI_INQUIRY, &hdev->flags))
2264                 return;
2265
2266         /* Start Inquiry */
2267         memcpy(&cp.lap, &ir->lap, 3);
2268         cp.length  = ir->length;
2269         cp.num_rsp = ir->num_rsp;
2270         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2271 }
2272
2273 int hci_inquiry(void __user *arg)
2274 {
2275         __u8 __user *ptr = arg;
2276         struct hci_inquiry_req ir;
2277         struct hci_dev *hdev;
2278         int err = 0, do_inquiry = 0, max_rsp;
2279         long timeo;
2280         __u8 *buf;
2281
2282         if (copy_from_user(&ir, ptr, sizeof(ir)))
2283                 return -EFAULT;
2284
2285         hdev = hci_dev_get(ir.dev_id);
2286         if (!hdev)
2287                 return -ENODEV;
2288
2289         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2290                 err = -EBUSY;
2291                 goto done;
2292         }
2293
2294         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2295                 err = -EOPNOTSUPP;
2296                 goto done;
2297         }
2298
2299         if (hdev->dev_type != HCI_BREDR) {
2300                 err = -EOPNOTSUPP;
2301                 goto done;
2302         }
2303
2304         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2305                 err = -EOPNOTSUPP;
2306                 goto done;
2307         }
2308
2309         hci_dev_lock(hdev);
2310         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2311             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2312                 hci_inquiry_cache_flush(hdev);
2313                 do_inquiry = 1;
2314         }
2315         hci_dev_unlock(hdev);
2316
2317         timeo = ir.length * msecs_to_jiffies(2000);
2318
2319         if (do_inquiry) {
2320                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2321                                    timeo);
2322                 if (err < 0)
2323                         goto done;
2324
2325                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2326                  * cleared). If it is interrupted by a signal, return -EINTR.
2327                  */
2328                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2329                                 TASK_INTERRUPTIBLE))
2330                         return -EINTR;
2331         }
2332
2333         /* for unlimited number of responses we will use buffer with
2334          * 255 entries
2335          */
2336         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2337
2338         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2339          * copy it to the user space.
2340          */
2341         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2342         if (!buf) {
2343                 err = -ENOMEM;
2344                 goto done;
2345         }
2346
2347         hci_dev_lock(hdev);
2348         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2349         hci_dev_unlock(hdev);
2350
2351         BT_DBG("num_rsp %d", ir.num_rsp);
2352
2353         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2354                 ptr += sizeof(ir);
2355                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2356                                  ir.num_rsp))
2357                         err = -EFAULT;
2358         } else
2359                 err = -EFAULT;
2360
2361         kfree(buf);
2362
2363 done:
2364         hci_dev_put(hdev);
2365         return err;
2366 }
2367
2368 static int hci_dev_do_open(struct hci_dev *hdev)
2369 {
2370         int ret = 0;
2371
2372         BT_DBG("%s %p", hdev->name, hdev);
2373
2374         hci_req_lock(hdev);
2375
2376         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2377                 ret = -ENODEV;
2378                 goto done;
2379         }
2380
2381         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2382             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2383                 /* Check for rfkill but allow the HCI setup stage to
2384                  * proceed (which in itself doesn't cause any RF activity).
2385                  */
2386                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2387                         ret = -ERFKILL;
2388                         goto done;
2389                 }
2390
2391                 /* Check for valid public address or a configured static
2392                  * random adddress, but let the HCI setup proceed to
2393                  * be able to determine if there is a public address
2394                  * or not.
2395                  *
2396                  * In case of user channel usage, it is not important
2397                  * if a public address or static random address is
2398                  * available.
2399                  *
2400                  * This check is only valid for BR/EDR controllers
2401                  * since AMP controllers do not have an address.
2402                  */
2403                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2404                     hdev->dev_type == HCI_BREDR &&
2405                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2406                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2407                         ret = -EADDRNOTAVAIL;
2408                         goto done;
2409                 }
2410         }
2411
2412         if (test_bit(HCI_UP, &hdev->flags)) {
2413                 ret = -EALREADY;
2414                 goto done;
2415         }
2416
2417         if (hdev->open(hdev)) {
2418                 ret = -EIO;
2419                 goto done;
2420         }
2421
2422         atomic_set(&hdev->cmd_cnt, 1);
2423         set_bit(HCI_INIT, &hdev->flags);
2424
2425         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2426                 if (hdev->setup)
2427                         ret = hdev->setup(hdev);
2428
2429                 /* The transport driver can set these quirks before
2430                  * creating the HCI device or in its setup callback.
2431                  *
2432                  * In case any of them is set, the controller has to
2433                  * start up as unconfigured.
2434                  */
2435                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2436                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2437                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2438
2439                 /* For an unconfigured controller it is required to
2440                  * read at least the version information provided by
2441                  * the Read Local Version Information command.
2442                  *
2443                  * If the set_bdaddr driver callback is provided, then
2444                  * also the original Bluetooth public device address
2445                  * will be read using the Read BD Address command.
2446                  */
2447                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2448                         ret = __hci_unconf_init(hdev);
2449         }
2450
2451         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2452                 /* If public address change is configured, ensure that
2453                  * the address gets programmed. If the driver does not
2454                  * support changing the public address, fail the power
2455                  * on procedure.
2456                  */
2457                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2458                     hdev->set_bdaddr)
2459                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2460                 else
2461                         ret = -EADDRNOTAVAIL;
2462         }
2463
2464         if (!ret) {
2465                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2466                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2467                         ret = __hci_init(hdev);
2468         }
2469
2470         clear_bit(HCI_INIT, &hdev->flags);
2471
2472         if (!ret) {
2473                 hci_dev_hold(hdev);
2474                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2475                 set_bit(HCI_UP, &hdev->flags);
2476                 hci_notify(hdev, HCI_DEV_UP);
2477                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2478                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2479                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2480                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2481                     hdev->dev_type == HCI_BREDR) {
2482                         hci_dev_lock(hdev);
2483                         mgmt_powered(hdev, 1);
2484                         hci_dev_unlock(hdev);
2485                 }
2486         } else {
2487                 /* Init failed, cleanup */
2488                 flush_work(&hdev->tx_work);
2489                 flush_work(&hdev->cmd_work);
2490                 flush_work(&hdev->rx_work);
2491
2492                 skb_queue_purge(&hdev->cmd_q);
2493                 skb_queue_purge(&hdev->rx_q);
2494
2495                 if (hdev->flush)
2496                         hdev->flush(hdev);
2497
2498                 if (hdev->sent_cmd) {
2499                         kfree_skb(hdev->sent_cmd);
2500                         hdev->sent_cmd = NULL;
2501                 }
2502
2503                 hdev->close(hdev);
2504                 hdev->flags &= BIT(HCI_RAW);
2505         }
2506
2507 done:
2508         hci_req_unlock(hdev);
2509         return ret;
2510 }
2511
2512 /* ---- HCI ioctl helpers ---- */
2513
2514 int hci_dev_open(__u16 dev)
2515 {
2516         struct hci_dev *hdev;
2517         int err;
2518
2519         hdev = hci_dev_get(dev);
2520         if (!hdev)
2521                 return -ENODEV;
2522
2523         /* Devices that are marked as unconfigured can only be powered
2524          * up as user channel. Trying to bring them up as normal devices
2525          * will result into a failure. Only user channel operation is
2526          * possible.
2527          *
2528          * When this function is called for a user channel, the flag
2529          * HCI_USER_CHANNEL will be set first before attempting to
2530          * open the device.
2531          */
2532         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2533             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2534                 err = -EOPNOTSUPP;
2535                 goto done;
2536         }
2537
2538         /* We need to ensure that no other power on/off work is pending
2539          * before proceeding to call hci_dev_do_open. This is
2540          * particularly important if the setup procedure has not yet
2541          * completed.
2542          */
2543         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2544                 cancel_delayed_work(&hdev->power_off);
2545
2546         /* After this call it is guaranteed that the setup procedure
2547          * has finished. This means that error conditions like RFKILL
2548          * or no valid public or static random address apply.
2549          */
2550         flush_workqueue(hdev->req_workqueue);
2551
2552         /* For controllers not using the management interface and that
2553          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2554          * so that pairing works for them. Once the management interface
2555          * is in use this bit will be cleared again and userspace has
2556          * to explicitly enable it.
2557          */
2558         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2559             !test_bit(HCI_MGMT, &hdev->dev_flags))
2560                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2561
2562         err = hci_dev_do_open(hdev);
2563
2564 done:
2565         hci_dev_put(hdev);
2566         return err;
2567 }
2568
2569 /* This function requires the caller holds hdev->lock */
2570 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2571 {
2572         struct hci_conn_params *p;
2573
2574         list_for_each_entry(p, &hdev->le_conn_params, list) {
2575                 if (p->conn) {
2576                         hci_conn_drop(p->conn);
2577                         hci_conn_put(p->conn);
2578                         p->conn = NULL;
2579                 }
2580                 list_del_init(&p->action);
2581         }
2582
2583         BT_DBG("All LE pending actions cleared");
2584 }
2585
2586 static int hci_dev_do_close(struct hci_dev *hdev)
2587 {
2588         BT_DBG("%s %p", hdev->name, hdev);
2589
2590         cancel_delayed_work(&hdev->power_off);
2591
2592         hci_req_cancel(hdev, ENODEV);
2593         hci_req_lock(hdev);
2594
2595         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2596                 cancel_delayed_work_sync(&hdev->cmd_timer);
2597                 hci_req_unlock(hdev);
2598                 return 0;
2599         }
2600
2601         /* Flush RX and TX works */
2602         flush_work(&hdev->tx_work);
2603         flush_work(&hdev->rx_work);
2604
2605         if (hdev->discov_timeout > 0) {
2606                 cancel_delayed_work(&hdev->discov_off);
2607                 hdev->discov_timeout = 0;
2608                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2609                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2610         }
2611
2612         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2613                 cancel_delayed_work(&hdev->service_cache);
2614
2615         cancel_delayed_work_sync(&hdev->le_scan_disable);
2616
2617         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2618                 cancel_delayed_work_sync(&hdev->rpa_expired);
2619
2620         /* Avoid potential lockdep warnings from the *_flush() calls by
2621          * ensuring the workqueue is empty up front.
2622          */
2623         drain_workqueue(hdev->workqueue);
2624
2625         hci_dev_lock(hdev);
2626         hci_inquiry_cache_flush(hdev);
2627         hci_pend_le_actions_clear(hdev);
2628         hci_conn_hash_flush(hdev);
2629         hci_dev_unlock(hdev);
2630
2631         hci_notify(hdev, HCI_DEV_DOWN);
2632
2633         if (hdev->flush)
2634                 hdev->flush(hdev);
2635
2636         /* Reset device */
2637         skb_queue_purge(&hdev->cmd_q);
2638         atomic_set(&hdev->cmd_cnt, 1);
2639         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2640             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2641             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2642                 set_bit(HCI_INIT, &hdev->flags);
2643                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2644                 clear_bit(HCI_INIT, &hdev->flags);
2645         }
2646
2647         /* flush cmd  work */
2648         flush_work(&hdev->cmd_work);
2649
2650         /* Drop queues */
2651         skb_queue_purge(&hdev->rx_q);
2652         skb_queue_purge(&hdev->cmd_q);
2653         skb_queue_purge(&hdev->raw_q);
2654
2655         /* Drop last sent command */
2656         if (hdev->sent_cmd) {
2657                 cancel_delayed_work_sync(&hdev->cmd_timer);
2658                 kfree_skb(hdev->sent_cmd);
2659                 hdev->sent_cmd = NULL;
2660         }
2661
2662         kfree_skb(hdev->recv_evt);
2663         hdev->recv_evt = NULL;
2664
2665         /* After this point our queues are empty
2666          * and no tasks are scheduled. */
2667         hdev->close(hdev);
2668
2669         /* Clear flags */
2670         hdev->flags &= BIT(HCI_RAW);
2671         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2672
2673         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2674                 if (hdev->dev_type == HCI_BREDR) {
2675                         hci_dev_lock(hdev);
2676                         mgmt_powered(hdev, 0);
2677                         hci_dev_unlock(hdev);
2678                 }
2679         }
2680
2681         /* Controller radio is available but is currently powered down */
2682         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2683
2684         memset(hdev->eir, 0, sizeof(hdev->eir));
2685         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2686         bacpy(&hdev->random_addr, BDADDR_ANY);
2687
2688         hci_req_unlock(hdev);
2689
2690         hci_dev_put(hdev);
2691         return 0;
2692 }
2693
2694 int hci_dev_close(__u16 dev)
2695 {
2696         struct hci_dev *hdev;
2697         int err;
2698
2699         hdev = hci_dev_get(dev);
2700         if (!hdev)
2701                 return -ENODEV;
2702
2703         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2704                 err = -EBUSY;
2705                 goto done;
2706         }
2707
2708         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2709                 cancel_delayed_work(&hdev->power_off);
2710
2711         err = hci_dev_do_close(hdev);
2712
2713 done:
2714         hci_dev_put(hdev);
2715         return err;
2716 }
2717
2718 int hci_dev_reset(__u16 dev)
2719 {
2720         struct hci_dev *hdev;
2721         int ret = 0;
2722
2723         hdev = hci_dev_get(dev);
2724         if (!hdev)
2725                 return -ENODEV;
2726
2727         hci_req_lock(hdev);
2728
2729         if (!test_bit(HCI_UP, &hdev->flags)) {
2730                 ret = -ENETDOWN;
2731                 goto done;
2732         }
2733
2734         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2735                 ret = -EBUSY;
2736                 goto done;
2737         }
2738
2739         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2740                 ret = -EOPNOTSUPP;
2741                 goto done;
2742         }
2743
2744         /* Drop queues */
2745         skb_queue_purge(&hdev->rx_q);
2746         skb_queue_purge(&hdev->cmd_q);
2747
2748         /* Avoid potential lockdep warnings from the *_flush() calls by
2749          * ensuring the workqueue is empty up front.
2750          */
2751         drain_workqueue(hdev->workqueue);
2752
2753         hci_dev_lock(hdev);
2754         hci_inquiry_cache_flush(hdev);
2755         hci_conn_hash_flush(hdev);
2756         hci_dev_unlock(hdev);
2757
2758         if (hdev->flush)
2759                 hdev->flush(hdev);
2760
2761         atomic_set(&hdev->cmd_cnt, 1);
2762         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2763
2764         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2765
2766 done:
2767         hci_req_unlock(hdev);
2768         hci_dev_put(hdev);
2769         return ret;
2770 }
2771
2772 int hci_dev_reset_stat(__u16 dev)
2773 {
2774         struct hci_dev *hdev;
2775         int ret = 0;
2776
2777         hdev = hci_dev_get(dev);
2778         if (!hdev)
2779                 return -ENODEV;
2780
2781         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2782                 ret = -EBUSY;
2783                 goto done;
2784         }
2785
2786         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2787                 ret = -EOPNOTSUPP;
2788                 goto done;
2789         }
2790
2791         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2792
2793 done:
2794         hci_dev_put(hdev);
2795         return ret;
2796 }
2797
2798 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2799 {
2800         bool conn_changed, discov_changed;
2801
2802         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2803
2804         if ((scan & SCAN_PAGE))
2805                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2806                                                  &hdev->dev_flags);
2807         else
2808                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2809                                                   &hdev->dev_flags);
2810
2811         if ((scan & SCAN_INQUIRY)) {
2812                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2813                                                    &hdev->dev_flags);
2814         } else {
2815                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2816                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2817                                                     &hdev->dev_flags);
2818         }
2819
2820         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2821                 return;
2822
2823         if (conn_changed || discov_changed) {
2824                 /* In case this was disabled through mgmt */
2825                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2826
2827                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2828                         mgmt_update_adv_data(hdev);
2829
2830                 mgmt_new_settings(hdev);
2831         }
2832 }
2833
2834 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2835 {
2836         struct hci_dev *hdev;
2837         struct hci_dev_req dr;
2838         int err = 0;
2839
2840         if (copy_from_user(&dr, arg, sizeof(dr)))
2841                 return -EFAULT;
2842
2843         hdev = hci_dev_get(dr.dev_id);
2844         if (!hdev)
2845                 return -ENODEV;
2846
2847         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2848                 err = -EBUSY;
2849                 goto done;
2850         }
2851
2852         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2853                 err = -EOPNOTSUPP;
2854                 goto done;
2855         }
2856
2857         if (hdev->dev_type != HCI_BREDR) {
2858                 err = -EOPNOTSUPP;
2859                 goto done;
2860         }
2861
2862         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2863                 err = -EOPNOTSUPP;
2864                 goto done;
2865         }
2866
2867         switch (cmd) {
2868         case HCISETAUTH:
2869                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2870                                    HCI_INIT_TIMEOUT);
2871                 break;
2872
2873         case HCISETENCRYPT:
2874                 if (!lmp_encrypt_capable(hdev)) {
2875                         err = -EOPNOTSUPP;
2876                         break;
2877                 }
2878
2879                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2880                         /* Auth must be enabled first */
2881                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2882                                            HCI_INIT_TIMEOUT);
2883                         if (err)
2884                                 break;
2885                 }
2886
2887                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2888                                    HCI_INIT_TIMEOUT);
2889                 break;
2890
2891         case HCISETSCAN:
2892                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2893                                    HCI_INIT_TIMEOUT);
2894
2895                 /* Ensure that the connectable and discoverable states
2896                  * get correctly modified as this was a non-mgmt change.
2897                  */
2898                 if (!err)
2899                         hci_update_scan_state(hdev, dr.dev_opt);
2900                 break;
2901
2902         case HCISETLINKPOL:
2903                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2904                                    HCI_INIT_TIMEOUT);
2905                 break;
2906
2907         case HCISETLINKMODE:
2908                 hdev->link_mode = ((__u16) dr.dev_opt) &
2909                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2910                 break;
2911
2912         case HCISETPTYPE:
2913                 hdev->pkt_type = (__u16) dr.dev_opt;
2914                 break;
2915
2916         case HCISETACLMTU:
2917                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2918                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2919                 break;
2920
2921         case HCISETSCOMTU:
2922                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2923                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2924                 break;
2925
2926         default:
2927                 err = -EINVAL;
2928                 break;
2929         }
2930
2931 done:
2932         hci_dev_put(hdev);
2933         return err;
2934 }
2935
2936 int hci_get_dev_list(void __user *arg)
2937 {
2938         struct hci_dev *hdev;
2939         struct hci_dev_list_req *dl;
2940         struct hci_dev_req *dr;
2941         int n = 0, size, err;
2942         __u16 dev_num;
2943
2944         if (get_user(dev_num, (__u16 __user *) arg))
2945                 return -EFAULT;
2946
2947         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2948                 return -EINVAL;
2949
2950         size = sizeof(*dl) + dev_num * sizeof(*dr);
2951
2952         dl = kzalloc(size, GFP_KERNEL);
2953         if (!dl)
2954                 return -ENOMEM;
2955
2956         dr = dl->dev_req;
2957
2958         read_lock(&hci_dev_list_lock);
2959         list_for_each_entry(hdev, &hci_dev_list, list) {
2960                 unsigned long flags = hdev->flags;
2961
2962                 /* When the auto-off is configured it means the transport
2963                  * is running, but in that case still indicate that the
2964                  * device is actually down.
2965                  */
2966                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2967                         flags &= ~BIT(HCI_UP);
2968
2969                 (dr + n)->dev_id  = hdev->id;
2970                 (dr + n)->dev_opt = flags;
2971
2972                 if (++n >= dev_num)
2973                         break;
2974         }
2975         read_unlock(&hci_dev_list_lock);
2976
2977         dl->dev_num = n;
2978         size = sizeof(*dl) + n * sizeof(*dr);
2979
2980         err = copy_to_user(arg, dl, size);
2981         kfree(dl);
2982
2983         return err ? -EFAULT : 0;
2984 }
2985
2986 int hci_get_dev_info(void __user *arg)
2987 {
2988         struct hci_dev *hdev;
2989         struct hci_dev_info di;
2990         unsigned long flags;
2991         int err = 0;
2992
2993         if (copy_from_user(&di, arg, sizeof(di)))
2994                 return -EFAULT;
2995
2996         hdev = hci_dev_get(di.dev_id);
2997         if (!hdev)
2998                 return -ENODEV;
2999
3000         /* When the auto-off is configured it means the transport
3001          * is running, but in that case still indicate that the
3002          * device is actually down.
3003          */
3004         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3005                 flags = hdev->flags & ~BIT(HCI_UP);
3006         else
3007                 flags = hdev->flags;
3008
3009         strcpy(di.name, hdev->name);
3010         di.bdaddr   = hdev->bdaddr;
3011         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
3012         di.flags    = flags;
3013         di.pkt_type = hdev->pkt_type;
3014         if (lmp_bredr_capable(hdev)) {
3015                 di.acl_mtu  = hdev->acl_mtu;
3016                 di.acl_pkts = hdev->acl_pkts;
3017                 di.sco_mtu  = hdev->sco_mtu;
3018                 di.sco_pkts = hdev->sco_pkts;
3019         } else {
3020                 di.acl_mtu  = hdev->le_mtu;
3021                 di.acl_pkts = hdev->le_pkts;
3022                 di.sco_mtu  = 0;
3023                 di.sco_pkts = 0;
3024         }
3025         di.link_policy = hdev->link_policy;
3026         di.link_mode   = hdev->link_mode;
3027
3028         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3029         memcpy(&di.features, &hdev->features, sizeof(di.features));
3030
3031         if (copy_to_user(arg, &di, sizeof(di)))
3032                 err = -EFAULT;
3033
3034         hci_dev_put(hdev);
3035
3036         return err;
3037 }
3038
3039 /* ---- Interface to HCI drivers ---- */
3040
3041 static int hci_rfkill_set_block(void *data, bool blocked)
3042 {
3043         struct hci_dev *hdev = data;
3044
3045         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3046
3047         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3048                 return -EBUSY;
3049
3050         if (blocked) {
3051                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3052                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3053                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
3054                         hci_dev_do_close(hdev);
3055         } else {
3056                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3057         }
3058
3059         return 0;
3060 }
3061
3062 static const struct rfkill_ops hci_rfkill_ops = {
3063         .set_block = hci_rfkill_set_block,
3064 };
3065
3066 static void hci_power_on(struct work_struct *work)
3067 {
3068         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3069         int err;
3070
3071         BT_DBG("%s", hdev->name);
3072
3073         err = hci_dev_do_open(hdev);
3074         if (err < 0) {
3075                 mgmt_set_powered_failed(hdev, err);
3076                 return;
3077         }
3078
3079         /* During the HCI setup phase, a few error conditions are
3080          * ignored and they need to be checked now. If they are still
3081          * valid, it is important to turn the device back off.
3082          */
3083         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3084             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3085             (hdev->dev_type == HCI_BREDR &&
3086              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3087              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3088                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3089                 hci_dev_do_close(hdev);
3090         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3091                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3092                                    HCI_AUTO_OFF_TIMEOUT);
3093         }
3094
3095         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3096                 /* For unconfigured devices, set the HCI_RAW flag
3097                  * so that userspace can easily identify them.
3098                  */
3099                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3100                         set_bit(HCI_RAW, &hdev->flags);
3101
3102                 /* For fully configured devices, this will send
3103                  * the Index Added event. For unconfigured devices,
3104                  * it will send Unconfigued Index Added event.
3105                  *
3106                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3107                  * and no event will be send.
3108                  */
3109                 mgmt_index_added(hdev);
3110         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3111                 /* When the controller is now configured, then it
3112                  * is important to clear the HCI_RAW flag.
3113                  */
3114                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3115                         clear_bit(HCI_RAW, &hdev->flags);
3116
3117                 /* Powering on the controller with HCI_CONFIG set only
3118                  * happens with the transition from unconfigured to
3119                  * configured. This will send the Index Added event.
3120                  */
3121                 mgmt_index_added(hdev);
3122         }
3123 }
3124
3125 static void hci_power_off(struct work_struct *work)
3126 {
3127         struct hci_dev *hdev = container_of(work, struct hci_dev,
3128                                             power_off.work);
3129
3130         BT_DBG("%s", hdev->name);
3131
3132         hci_dev_do_close(hdev);
3133 }
3134
3135 static void hci_discov_off(struct work_struct *work)
3136 {
3137         struct hci_dev *hdev;
3138
3139         hdev = container_of(work, struct hci_dev, discov_off.work);
3140
3141         BT_DBG("%s", hdev->name);
3142
3143         mgmt_discoverable_timeout(hdev);
3144 }
3145
3146 void hci_uuids_clear(struct hci_dev *hdev)
3147 {
3148         struct bt_uuid *uuid, *tmp;
3149
3150         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3151                 list_del(&uuid->list);
3152                 kfree(uuid);
3153         }
3154 }
3155
3156 void hci_link_keys_clear(struct hci_dev *hdev)
3157 {
3158         struct link_key *key;
3159
3160         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3161                 list_del_rcu(&key->list);
3162                 kfree_rcu(key, rcu);
3163         }
3164 }
3165
3166 void hci_smp_ltks_clear(struct hci_dev *hdev)
3167 {
3168         struct smp_ltk *k;
3169
3170         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3171                 list_del_rcu(&k->list);
3172                 kfree_rcu(k, rcu);
3173         }
3174 }
3175
3176 void hci_smp_irks_clear(struct hci_dev *hdev)
3177 {
3178         struct smp_irk *k;
3179
3180         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3181                 list_del_rcu(&k->list);
3182                 kfree_rcu(k, rcu);
3183         }
3184 }
3185
3186 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3187 {
3188         struct link_key *k;
3189
3190         rcu_read_lock();
3191         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3192                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3193                         rcu_read_unlock();
3194                         return k;
3195                 }
3196         }
3197         rcu_read_unlock();
3198
3199         return NULL;
3200 }
3201
3202 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3203                                u8 key_type, u8 old_key_type)
3204 {
3205         /* Legacy key */
3206         if (key_type < 0x03)
3207                 return true;
3208
3209         /* Debug keys are insecure so don't store them persistently */
3210         if (key_type == HCI_LK_DEBUG_COMBINATION)
3211                 return false;
3212
3213         /* Changed combination key and there's no previous one */
3214         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3215                 return false;
3216
3217         /* Security mode 3 case */
3218         if (!conn)
3219                 return true;
3220
3221         /* BR/EDR key derived using SC from an LE link */
3222         if (conn->type == LE_LINK)
3223                 return true;
3224
3225         /* Neither local nor remote side had no-bonding as requirement */
3226         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3227                 return true;
3228
3229         /* Local side had dedicated bonding as requirement */
3230         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3231                 return true;
3232
3233         /* Remote side had dedicated bonding as requirement */
3234         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3235                 return true;
3236
3237         /* If none of the above criteria match, then don't store the key
3238          * persistently */
3239         return false;
3240 }
3241
3242 static u8 ltk_role(u8 type)
3243 {
3244         if (type == SMP_LTK)
3245                 return HCI_ROLE_MASTER;
3246
3247         return HCI_ROLE_SLAVE;
3248 }
3249
3250 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3251                              u8 addr_type, u8 role)
3252 {
3253         struct smp_ltk *k;
3254
3255         rcu_read_lock();
3256         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3257                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3258                         continue;
3259
3260                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
3261                         rcu_read_unlock();
3262                         return k;
3263                 }
3264         }
3265         rcu_read_unlock();
3266
3267         return NULL;
3268 }
3269
3270 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3271 {
3272         struct smp_irk *irk;
3273
3274         rcu_read_lock();
3275         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3276                 if (!bacmp(&irk->rpa, rpa)) {
3277                         rcu_read_unlock();
3278                         return irk;
3279                 }
3280         }
3281
3282         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3283                 if (smp_irk_matches(hdev, irk->val, rpa)) {
3284                         bacpy(&irk->rpa, rpa);
3285                         rcu_read_unlock();
3286                         return irk;
3287                 }
3288         }
3289         rcu_read_unlock();
3290
3291         return NULL;
3292 }
3293
3294 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3295                                      u8 addr_type)
3296 {
3297         struct smp_irk *irk;
3298
3299         /* Identity Address must be public or static random */
3300         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3301                 return NULL;
3302
3303         rcu_read_lock();
3304         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3305                 if (addr_type == irk->addr_type &&
3306                     bacmp(bdaddr, &irk->bdaddr) == 0) {
3307                         rcu_read_unlock();
3308                         return irk;
3309                 }
3310         }
3311         rcu_read_unlock();
3312
3313         return NULL;
3314 }
3315
3316 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3317                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3318                                   u8 pin_len, bool *persistent)
3319 {
3320         struct link_key *key, *old_key;
3321         u8 old_key_type;
3322
3323         old_key = hci_find_link_key(hdev, bdaddr);
3324         if (old_key) {
3325                 old_key_type = old_key->type;
3326                 key = old_key;
3327         } else {
3328                 old_key_type = conn ? conn->key_type : 0xff;
3329                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3330                 if (!key)
3331                         return NULL;
3332                 list_add_rcu(&key->list, &hdev->link_keys);
3333         }
3334
3335         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3336
3337         /* Some buggy controller combinations generate a changed
3338          * combination key for legacy pairing even when there's no
3339          * previous key */
3340         if (type == HCI_LK_CHANGED_COMBINATION &&
3341             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3342                 type = HCI_LK_COMBINATION;
3343                 if (conn)
3344                         conn->key_type = type;
3345         }
3346
3347         bacpy(&key->bdaddr, bdaddr);
3348         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3349         key->pin_len = pin_len;
3350
3351         if (type == HCI_LK_CHANGED_COMBINATION)
3352                 key->type = old_key_type;
3353         else
3354                 key->type = type;
3355
3356         if (persistent)
3357                 *persistent = hci_persistent_key(hdev, conn, type,
3358                                                  old_key_type);
3359
3360         return key;
3361 }
3362
3363 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3364                             u8 addr_type, u8 type, u8 authenticated,
3365                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3366 {
3367         struct smp_ltk *key, *old_key;
3368         u8 role = ltk_role(type);
3369
3370         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
3371         if (old_key)
3372                 key = old_key;
3373         else {
3374                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3375                 if (!key)
3376                         return NULL;
3377                 list_add_rcu(&key->list, &hdev->long_term_keys);
3378         }
3379
3380         bacpy(&key->bdaddr, bdaddr);
3381         key->bdaddr_type = addr_type;
3382         memcpy(key->val, tk, sizeof(key->val));
3383         key->authenticated = authenticated;
3384         key->ediv = ediv;
3385         key->rand = rand;
3386         key->enc_size = enc_size;
3387         key->type = type;
3388
3389         return key;
3390 }
3391
3392 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3393                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3394 {
3395         struct smp_irk *irk;
3396
3397         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3398         if (!irk) {
3399                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3400                 if (!irk)
3401                         return NULL;
3402
3403                 bacpy(&irk->bdaddr, bdaddr);
3404                 irk->addr_type = addr_type;
3405
3406                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
3407         }
3408
3409         memcpy(irk->val, val, 16);
3410         bacpy(&irk->rpa, rpa);
3411
3412         return irk;
3413 }
3414
3415 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3416 {
3417         struct link_key *key;
3418
3419         key = hci_find_link_key(hdev, bdaddr);
3420         if (!key)
3421                 return -ENOENT;
3422
3423         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3424
3425         list_del_rcu(&key->list);
3426         kfree_rcu(key, rcu);
3427
3428         return 0;
3429 }
3430
3431 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3432 {
3433         struct smp_ltk *k;
3434         int removed = 0;
3435
3436         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3437                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3438                         continue;
3439
3440                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3441
3442                 list_del_rcu(&k->list);
3443                 kfree_rcu(k, rcu);
3444                 removed++;
3445         }
3446
3447         return removed ? 0 : -ENOENT;
3448 }
3449
3450 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3451 {
3452         struct smp_irk *k;
3453
3454         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3455                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3456                         continue;
3457
3458                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3459
3460                 list_del_rcu(&k->list);
3461                 kfree_rcu(k, rcu);
3462         }
3463 }
3464
3465 /* HCI command timer function */
3466 static void hci_cmd_timeout(struct work_struct *work)
3467 {
3468         struct hci_dev *hdev = container_of(work, struct hci_dev,
3469                                             cmd_timer.work);
3470
3471         if (hdev->sent_cmd) {
3472                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3473                 u16 opcode = __le16_to_cpu(sent->opcode);
3474
3475                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3476         } else {
3477                 BT_ERR("%s command tx timeout", hdev->name);
3478         }
3479
3480         atomic_set(&hdev->cmd_cnt, 1);
3481         queue_work(hdev->workqueue, &hdev->cmd_work);
3482 }
3483
3484 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3485                                           bdaddr_t *bdaddr, u8 bdaddr_type)
3486 {
3487         struct oob_data *data;
3488
3489         list_for_each_entry(data, &hdev->remote_oob_data, list) {
3490                 if (bacmp(bdaddr, &data->bdaddr) != 0)
3491                         continue;
3492                 if (data->bdaddr_type != bdaddr_type)
3493                         continue;
3494                 return data;
3495         }
3496
3497         return NULL;
3498 }
3499
3500 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3501                                u8 bdaddr_type)
3502 {
3503         struct oob_data *data;
3504
3505         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3506         if (!data)
3507                 return -ENOENT;
3508
3509         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
3510
3511         list_del(&data->list);
3512         kfree(data);
3513
3514         return 0;
3515 }
3516
3517 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3518 {
3519         struct oob_data *data, *n;
3520
3521         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3522                 list_del(&data->list);
3523                 kfree(data);
3524         }
3525 }
3526
3527 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3528                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
3529                             u8 *hash256, u8 *rand256)
3530 {
3531         struct oob_data *data;
3532
3533         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3534         if (!data) {
3535                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3536                 if (!data)
3537                         return -ENOMEM;
3538
3539                 bacpy(&data->bdaddr, bdaddr);
3540                 data->bdaddr_type = bdaddr_type;
3541                 list_add(&data->list, &hdev->remote_oob_data);
3542         }
3543
3544         if (hash192 && rand192) {
3545                 memcpy(data->hash192, hash192, sizeof(data->hash192));
3546                 memcpy(data->rand192, rand192, sizeof(data->rand192));
3547         } else {
3548                 memset(data->hash192, 0, sizeof(data->hash192));
3549                 memset(data->rand192, 0, sizeof(data->rand192));
3550         }
3551
3552         if (hash256 && rand256) {
3553                 memcpy(data->hash256, hash256, sizeof(data->hash256));
3554                 memcpy(data->rand256, rand256, sizeof(data->rand256));
3555         } else {
3556                 memset(data->hash256, 0, sizeof(data->hash256));
3557                 memset(data->rand256, 0, sizeof(data->rand256));
3558         }
3559
3560         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3561
3562         return 0;
3563 }
3564
3565 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3566                                          bdaddr_t *bdaddr, u8 type)
3567 {
3568         struct bdaddr_list *b;
3569
3570         list_for_each_entry(b, bdaddr_list, list) {
3571                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3572                         return b;
3573         }
3574
3575         return NULL;
3576 }
3577
3578 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3579 {
3580         struct list_head *p, *n;
3581
3582         list_for_each_safe(p, n, bdaddr_list) {
3583                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3584
3585                 list_del(p);
3586                 kfree(b);
3587         }
3588 }
3589
3590 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3591 {
3592         struct bdaddr_list *entry;
3593
3594         if (!bacmp(bdaddr, BDADDR_ANY))
3595                 return -EBADF;
3596
3597         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3598                 return -EEXIST;
3599
3600         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3601         if (!entry)
3602                 return -ENOMEM;
3603
3604         bacpy(&entry->bdaddr, bdaddr);
3605         entry->bdaddr_type = type;
3606
3607         list_add(&entry->list, list);
3608
3609         return 0;
3610 }
3611
3612 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3613 {
3614         struct bdaddr_list *entry;
3615
3616         if (!bacmp(bdaddr, BDADDR_ANY)) {
3617                 hci_bdaddr_list_clear(list);
3618                 return 0;
3619         }
3620
3621         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3622         if (!entry)
3623                 return -ENOENT;
3624
3625         list_del(&entry->list);
3626         kfree(entry);
3627
3628         return 0;
3629 }
3630
3631 /* This function requires the caller holds hdev->lock */
3632 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3633                                                bdaddr_t *addr, u8 addr_type)
3634 {
3635         struct hci_conn_params *params;
3636
3637         /* The conn params list only contains identity addresses */
3638         if (!hci_is_identity_address(addr, addr_type))
3639                 return NULL;
3640
3641         list_for_each_entry(params, &hdev->le_conn_params, list) {
3642                 if (bacmp(&params->addr, addr) == 0 &&
3643                     params->addr_type == addr_type) {
3644                         return params;
3645                 }
3646         }
3647
3648         return NULL;
3649 }
3650
3651 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3652 {
3653         struct hci_conn *conn;
3654
3655         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3656         if (!conn)
3657                 return false;
3658
3659         if (conn->dst_type != type)
3660                 return false;
3661
3662         if (conn->state != BT_CONNECTED)
3663                 return false;
3664
3665         return true;
3666 }
3667
3668 /* This function requires the caller holds hdev->lock */
3669 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3670                                                   bdaddr_t *addr, u8 addr_type)
3671 {
3672         struct hci_conn_params *param;
3673
3674         /* The list only contains identity addresses */
3675         if (!hci_is_identity_address(addr, addr_type))
3676                 return NULL;
3677
3678         list_for_each_entry(param, list, action) {
3679                 if (bacmp(&param->addr, addr) == 0 &&
3680                     param->addr_type == addr_type)
3681                         return param;
3682         }
3683
3684         return NULL;
3685 }
3686
3687 /* This function requires the caller holds hdev->lock */
3688 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3689                                             bdaddr_t *addr, u8 addr_type)
3690 {
3691         struct hci_conn_params *params;
3692
3693         if (!hci_is_identity_address(addr, addr_type))
3694                 return NULL;
3695
3696         params = hci_conn_params_lookup(hdev, addr, addr_type);
3697         if (params)
3698                 return params;
3699
3700         params = kzalloc(sizeof(*params), GFP_KERNEL);
3701         if (!params) {
3702                 BT_ERR("Out of memory");
3703                 return NULL;
3704         }
3705
3706         bacpy(&params->addr, addr);
3707         params->addr_type = addr_type;
3708
3709         list_add(&params->list, &hdev->le_conn_params);
3710         INIT_LIST_HEAD(&params->action);
3711
3712         params->conn_min_interval = hdev->le_conn_min_interval;
3713         params->conn_max_interval = hdev->le_conn_max_interval;
3714         params->conn_latency = hdev->le_conn_latency;
3715         params->supervision_timeout = hdev->le_supv_timeout;
3716         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3717
3718         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3719
3720         return params;
3721 }
3722
3723 /* This function requires the caller holds hdev->lock */
3724 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3725                         u8 auto_connect)
3726 {
3727         struct hci_conn_params *params;
3728
3729         params = hci_conn_params_add(hdev, addr, addr_type);
3730         if (!params)
3731                 return -EIO;
3732
3733         if (params->auto_connect == auto_connect)
3734                 return 0;
3735
3736         list_del_init(&params->action);
3737
3738         switch (auto_connect) {
3739         case HCI_AUTO_CONN_DISABLED:
3740         case HCI_AUTO_CONN_LINK_LOSS:
3741                 hci_update_background_scan(hdev);
3742                 break;
3743         case HCI_AUTO_CONN_REPORT:
3744                 list_add(&params->action, &hdev->pend_le_reports);
3745                 hci_update_background_scan(hdev);
3746                 break;
3747         case HCI_AUTO_CONN_DIRECT:
3748         case HCI_AUTO_CONN_ALWAYS:
3749                 if (!is_connected(hdev, addr, addr_type)) {
3750                         list_add(&params->action, &hdev->pend_le_conns);
3751                         hci_update_background_scan(hdev);
3752                 }
3753                 break;
3754         }
3755
3756         params->auto_connect = auto_connect;
3757
3758         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3759                auto_connect);
3760
3761         return 0;
3762 }
3763
3764 static void hci_conn_params_free(struct hci_conn_params *params)
3765 {
3766         if (params->conn) {
3767                 hci_conn_drop(params->conn);
3768                 hci_conn_put(params->conn);
3769         }
3770
3771         list_del(&params->action);
3772         list_del(&params->list);
3773         kfree(params);
3774 }
3775
3776 /* This function requires the caller holds hdev->lock */
3777 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3778 {
3779         struct hci_conn_params *params;
3780
3781         params = hci_conn_params_lookup(hdev, addr, addr_type);
3782         if (!params)
3783                 return;
3784
3785         hci_conn_params_free(params);
3786
3787         hci_update_background_scan(hdev);
3788
3789         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3790 }
3791
3792 /* This function requires the caller holds hdev->lock */
3793 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3794 {
3795         struct hci_conn_params *params, *tmp;
3796
3797         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3798                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3799                         continue;
3800                 list_del(&params->list);
3801                 kfree(params);
3802         }
3803
3804         BT_DBG("All LE disabled connection parameters were removed");
3805 }
3806
3807 /* This function requires the caller holds hdev->lock */
3808 void hci_conn_params_clear_all(struct hci_dev *hdev)
3809 {
3810         struct hci_conn_params *params, *tmp;
3811
3812         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3813                 hci_conn_params_free(params);
3814
3815         hci_update_background_scan(hdev);
3816
3817         BT_DBG("All LE connection parameters were removed");
3818 }
3819
3820 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3821 {
3822         if (status) {
3823                 BT_ERR("Failed to start inquiry: status %d", status);
3824
3825                 hci_dev_lock(hdev);
3826                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3827                 hci_dev_unlock(hdev);
3828                 return;
3829         }
3830 }
3831
3832 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3833 {
3834         /* General inquiry access code (GIAC) */
3835         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3836         struct hci_request req;
3837         struct hci_cp_inquiry cp;
3838         int err;
3839
3840         if (status) {
3841                 BT_ERR("Failed to disable LE scanning: status %d", status);
3842                 return;
3843         }
3844
3845         switch (hdev->discovery.type) {
3846         case DISCOV_TYPE_LE:
3847                 hci_dev_lock(hdev);
3848                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3849                 hci_dev_unlock(hdev);
3850                 break;
3851
3852         case DISCOV_TYPE_INTERLEAVED:
3853                 hci_req_init(&req, hdev);
3854
3855                 memset(&cp, 0, sizeof(cp));
3856                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3857                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3858                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3859
3860                 hci_dev_lock(hdev);
3861
3862                 hci_inquiry_cache_flush(hdev);
3863
3864                 err = hci_req_run(&req, inquiry_complete);
3865                 if (err) {
3866                         BT_ERR("Inquiry request failed: err %d", err);
3867                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3868                 }
3869
3870                 hci_dev_unlock(hdev);
3871                 break;
3872         }
3873 }
3874
3875 static void le_scan_disable_work(struct work_struct *work)
3876 {
3877         struct hci_dev *hdev = container_of(work, struct hci_dev,
3878                                             le_scan_disable.work);
3879         struct hci_request req;
3880         int err;
3881
3882         BT_DBG("%s", hdev->name);
3883
3884         hci_req_init(&req, hdev);
3885
3886         hci_req_add_le_scan_disable(&req);
3887
3888         err = hci_req_run(&req, le_scan_disable_work_complete);
3889         if (err)
3890                 BT_ERR("Disable LE scanning request failed: err %d", err);
3891 }
3892
3893 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3894 {
3895         struct hci_dev *hdev = req->hdev;
3896
3897         /* If we're advertising or initiating an LE connection we can't
3898          * go ahead and change the random address at this time. This is
3899          * because the eventual initiator address used for the
3900          * subsequently created connection will be undefined (some
3901          * controllers use the new address and others the one we had
3902          * when the operation started).
3903          *
3904          * In this kind of scenario skip the update and let the random
3905          * address be updated at the next cycle.
3906          */
3907         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3908             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3909                 BT_DBG("Deferring random address update");
3910                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3911                 return;
3912         }
3913
3914         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3915 }
3916
3917 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3918                               u8 *own_addr_type)
3919 {
3920         struct hci_dev *hdev = req->hdev;
3921         int err;
3922
3923         /* If privacy is enabled use a resolvable private address. If
3924          * current RPA has expired or there is something else than
3925          * the current RPA in use, then generate a new one.
3926          */
3927         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3928                 int to;
3929
3930                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3931
3932                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3933                     !bacmp(&hdev->random_addr, &hdev->rpa))
3934                         return 0;
3935
3936                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3937                 if (err < 0) {
3938                         BT_ERR("%s failed to generate new RPA", hdev->name);
3939                         return err;
3940                 }
3941
3942                 set_random_addr(req, &hdev->rpa);
3943
3944                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3945                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3946
3947                 return 0;
3948         }
3949
3950         /* In case of required privacy without resolvable private address,
3951          * use an unresolvable private address. This is useful for active
3952          * scanning and non-connectable advertising.
3953          */
3954         if (require_privacy) {
3955                 bdaddr_t urpa;
3956
3957                 get_random_bytes(&urpa, 6);
3958                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3959
3960                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3961                 set_random_addr(req, &urpa);
3962                 return 0;
3963         }
3964
3965         /* If forcing static address is in use or there is no public
3966          * address use the static address as random address (but skip
3967          * the HCI command if the current random address is already the
3968          * static one.
3969          */
3970         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3971             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3972                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3973                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3974                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3975                                     &hdev->static_addr);
3976                 return 0;
3977         }
3978
3979         /* Neither privacy nor static address is being used so use a
3980          * public address.
3981          */
3982         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3983
3984         return 0;
3985 }
3986
3987 /* Copy the Identity Address of the controller.
3988  *
3989  * If the controller has a public BD_ADDR, then by default use that one.
3990  * If this is a LE only controller without a public address, default to
3991  * the static random address.
3992  *
3993  * For debugging purposes it is possible to force controllers with a
3994  * public address to use the static random address instead.
3995  */
3996 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3997                                u8 *bdaddr_type)
3998 {
3999         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
4000             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
4001                 bacpy(bdaddr, &hdev->static_addr);
4002                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
4003         } else {
4004                 bacpy(bdaddr, &hdev->bdaddr);
4005                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
4006         }
4007 }
4008
4009 /* Alloc HCI device */
4010 struct hci_dev *hci_alloc_dev(void)
4011 {
4012         struct hci_dev *hdev;
4013
4014         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
4015         if (!hdev)
4016                 return NULL;
4017
4018         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4019         hdev->esco_type = (ESCO_HV1);
4020         hdev->link_mode = (HCI_LM_ACCEPT);
4021         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
4022         hdev->io_capability = 0x03;     /* No Input No Output */
4023         hdev->manufacturer = 0xffff;    /* Default to internal use */
4024         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4025         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
4026
4027         hdev->sniff_max_interval = 800;
4028         hdev->sniff_min_interval = 80;
4029
4030         hdev->le_adv_channel_map = 0x07;
4031         hdev->le_adv_min_interval = 0x0800;
4032         hdev->le_adv_max_interval = 0x0800;
4033         hdev->le_scan_interval = 0x0060;
4034         hdev->le_scan_window = 0x0030;
4035         hdev->le_conn_min_interval = 0x0028;
4036         hdev->le_conn_max_interval = 0x0038;
4037         hdev->le_conn_latency = 0x0000;
4038         hdev->le_supv_timeout = 0x002a;
4039
4040         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4041         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4042         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4043         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4044
4045         mutex_init(&hdev->lock);
4046         mutex_init(&hdev->req_lock);
4047
4048         INIT_LIST_HEAD(&hdev->mgmt_pending);
4049         INIT_LIST_HEAD(&hdev->blacklist);
4050         INIT_LIST_HEAD(&hdev->whitelist);
4051         INIT_LIST_HEAD(&hdev->uuids);
4052         INIT_LIST_HEAD(&hdev->link_keys);
4053         INIT_LIST_HEAD(&hdev->long_term_keys);
4054         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4055         INIT_LIST_HEAD(&hdev->remote_oob_data);
4056         INIT_LIST_HEAD(&hdev->le_white_list);
4057         INIT_LIST_HEAD(&hdev->le_conn_params);
4058         INIT_LIST_HEAD(&hdev->pend_le_conns);
4059         INIT_LIST_HEAD(&hdev->pend_le_reports);
4060         INIT_LIST_HEAD(&hdev->conn_hash.list);
4061
4062         INIT_WORK(&hdev->rx_work, hci_rx_work);
4063         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4064         INIT_WORK(&hdev->tx_work, hci_tx_work);
4065         INIT_WORK(&hdev->power_on, hci_power_on);
4066
4067         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4068         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4069         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4070
4071         skb_queue_head_init(&hdev->rx_q);
4072         skb_queue_head_init(&hdev->cmd_q);
4073         skb_queue_head_init(&hdev->raw_q);
4074
4075         init_waitqueue_head(&hdev->req_wait_q);
4076
4077         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4078
4079         hci_init_sysfs(hdev);
4080         discovery_init(hdev);
4081
4082         return hdev;
4083 }
4084 EXPORT_SYMBOL(hci_alloc_dev);
4085
4086 /* Free HCI device */
4087 void hci_free_dev(struct hci_dev *hdev)
4088 {
4089         /* will free via device release */
4090         put_device(&hdev->dev);
4091 }
4092 EXPORT_SYMBOL(hci_free_dev);
4093
4094 /* Register HCI device */
4095 int hci_register_dev(struct hci_dev *hdev)
4096 {
4097         int id, error;
4098
4099         if (!hdev->open || !hdev->close || !hdev->send)
4100                 return -EINVAL;
4101
4102         /* Do not allow HCI_AMP devices to register at index 0,
4103          * so the index can be used as the AMP controller ID.
4104          */
4105         switch (hdev->dev_type) {
4106         case HCI_BREDR:
4107                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4108                 break;
4109         case HCI_AMP:
4110                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4111                 break;
4112         default:
4113                 return -EINVAL;
4114         }
4115
4116         if (id < 0)
4117                 return id;
4118
4119         sprintf(hdev->name, "hci%d", id);
4120         hdev->id = id;
4121
4122         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4123
4124         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4125                                           WQ_MEM_RECLAIM, 1, hdev->name);
4126         if (!hdev->workqueue) {
4127                 error = -ENOMEM;
4128                 goto err;
4129         }
4130
4131         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4132                                               WQ_MEM_RECLAIM, 1, hdev->name);
4133         if (!hdev->req_workqueue) {
4134                 destroy_workqueue(hdev->workqueue);
4135                 error = -ENOMEM;
4136                 goto err;
4137         }
4138
4139         if (!IS_ERR_OR_NULL(bt_debugfs))
4140                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4141
4142         dev_set_name(&hdev->dev, "%s", hdev->name);
4143
4144         error = device_add(&hdev->dev);
4145         if (error < 0)
4146                 goto err_wqueue;
4147
4148         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4149                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4150                                     hdev);
4151         if (hdev->rfkill) {
4152                 if (rfkill_register(hdev->rfkill) < 0) {
4153                         rfkill_destroy(hdev->rfkill);
4154                         hdev->rfkill = NULL;
4155                 }
4156         }
4157
4158         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4159                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4160
4161         set_bit(HCI_SETUP, &hdev->dev_flags);
4162         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4163
4164         if (hdev->dev_type == HCI_BREDR) {
4165                 /* Assume BR/EDR support until proven otherwise (such as
4166                  * through reading supported features during init.
4167                  */
4168                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4169         }
4170
4171         write_lock(&hci_dev_list_lock);
4172         list_add(&hdev->list, &hci_dev_list);
4173         write_unlock(&hci_dev_list_lock);
4174
4175         /* Devices that are marked for raw-only usage are unconfigured
4176          * and should not be included in normal operation.
4177          */
4178         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4179                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4180
4181         hci_notify(hdev, HCI_DEV_REG);
4182         hci_dev_hold(hdev);
4183
4184         queue_work(hdev->req_workqueue, &hdev->power_on);
4185
4186         return id;
4187
4188 err_wqueue:
4189         destroy_workqueue(hdev->workqueue);
4190         destroy_workqueue(hdev->req_workqueue);
4191 err:
4192         ida_simple_remove(&hci_index_ida, hdev->id);
4193
4194         return error;
4195 }
4196 EXPORT_SYMBOL(hci_register_dev);
4197
4198 /* Unregister HCI device */
4199 void hci_unregister_dev(struct hci_dev *hdev)
4200 {
4201         int i, id;
4202
4203         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4204
4205         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4206
4207         id = hdev->id;
4208
4209         write_lock(&hci_dev_list_lock);
4210         list_del(&hdev->list);
4211         write_unlock(&hci_dev_list_lock);
4212
4213         hci_dev_do_close(hdev);
4214
4215         for (i = 0; i < NUM_REASSEMBLY; i++)
4216                 kfree_skb(hdev->reassembly[i]);
4217
4218         cancel_work_sync(&hdev->power_on);
4219
4220         if (!test_bit(HCI_INIT, &hdev->flags) &&
4221             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4222             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4223                 hci_dev_lock(hdev);
4224                 mgmt_index_removed(hdev);
4225                 hci_dev_unlock(hdev);
4226         }
4227
4228         /* mgmt_index_removed should take care of emptying the
4229          * pending list */
4230         BUG_ON(!list_empty(&hdev->mgmt_pending));
4231
4232         hci_notify(hdev, HCI_DEV_UNREG);
4233
4234         if (hdev->rfkill) {
4235                 rfkill_unregister(hdev->rfkill);
4236                 rfkill_destroy(hdev->rfkill);
4237         }
4238
4239         smp_unregister(hdev);
4240
4241         device_del(&hdev->dev);
4242
4243         debugfs_remove_recursive(hdev->debugfs);
4244
4245         destroy_workqueue(hdev->workqueue);
4246         destroy_workqueue(hdev->req_workqueue);
4247
4248         hci_dev_lock(hdev);
4249         hci_bdaddr_list_clear(&hdev->blacklist);
4250         hci_bdaddr_list_clear(&hdev->whitelist);
4251         hci_uuids_clear(hdev);
4252         hci_link_keys_clear(hdev);
4253         hci_smp_ltks_clear(hdev);
4254         hci_smp_irks_clear(hdev);
4255         hci_remote_oob_data_clear(hdev);
4256         hci_bdaddr_list_clear(&hdev->le_white_list);
4257         hci_conn_params_clear_all(hdev);
4258         hci_discovery_filter_clear(hdev);
4259         hci_dev_unlock(hdev);
4260
4261         hci_dev_put(hdev);
4262
4263         ida_simple_remove(&hci_index_ida, id);
4264 }
4265 EXPORT_SYMBOL(hci_unregister_dev);
4266
4267 /* Suspend HCI device */
4268 int hci_suspend_dev(struct hci_dev *hdev)
4269 {
4270         hci_notify(hdev, HCI_DEV_SUSPEND);
4271         return 0;
4272 }
4273 EXPORT_SYMBOL(hci_suspend_dev);
4274
4275 /* Resume HCI device */
4276 int hci_resume_dev(struct hci_dev *hdev)
4277 {
4278         hci_notify(hdev, HCI_DEV_RESUME);
4279         return 0;
4280 }
4281 EXPORT_SYMBOL(hci_resume_dev);
4282
4283 /* Reset HCI device */
4284 int hci_reset_dev(struct hci_dev *hdev)
4285 {
4286         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4287         struct sk_buff *skb;
4288
4289         skb = bt_skb_alloc(3, GFP_ATOMIC);
4290         if (!skb)
4291                 return -ENOMEM;
4292
4293         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4294         memcpy(skb_put(skb, 3), hw_err, 3);
4295
4296         /* Send Hardware Error to upper stack */
4297         return hci_recv_frame(hdev, skb);
4298 }
4299 EXPORT_SYMBOL(hci_reset_dev);
4300
4301 /* Receive frame from HCI drivers */
4302 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4303 {
4304         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4305                       && !test_bit(HCI_INIT, &hdev->flags))) {
4306                 kfree_skb(skb);
4307                 return -ENXIO;
4308         }
4309
4310         /* Incoming skb */
4311         bt_cb(skb)->incoming = 1;
4312
4313         /* Time stamp */
4314         __net_timestamp(skb);
4315
4316         skb_queue_tail(&hdev->rx_q, skb);
4317         queue_work(hdev->workqueue, &hdev->rx_work);
4318
4319         return 0;
4320 }
4321 EXPORT_SYMBOL(hci_recv_frame);
4322
4323 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4324                           int count, __u8 index)
4325 {
4326         int len = 0;
4327         int hlen = 0;
4328         int remain = count;
4329         struct sk_buff *skb;
4330         struct bt_skb_cb *scb;
4331
4332         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4333             index >= NUM_REASSEMBLY)
4334                 return -EILSEQ;
4335
4336         skb = hdev->reassembly[index];
4337
4338         if (!skb) {
4339                 switch (type) {
4340                 case HCI_ACLDATA_PKT:
4341                         len = HCI_MAX_FRAME_SIZE;
4342                         hlen = HCI_ACL_HDR_SIZE;
4343                         break;
4344                 case HCI_EVENT_PKT:
4345                         len = HCI_MAX_EVENT_SIZE;
4346                         hlen = HCI_EVENT_HDR_SIZE;
4347                         break;
4348                 case HCI_SCODATA_PKT:
4349                         len = HCI_MAX_SCO_SIZE;
4350                         hlen = HCI_SCO_HDR_SIZE;
4351                         break;
4352                 }
4353
4354                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4355                 if (!skb)
4356                         return -ENOMEM;
4357
4358                 scb = (void *) skb->cb;
4359                 scb->expect = hlen;
4360                 scb->pkt_type = type;
4361
4362                 hdev->reassembly[index] = skb;
4363         }
4364
4365         while (count) {
4366                 scb = (void *) skb->cb;
4367                 len = min_t(uint, scb->expect, count);
4368
4369                 memcpy(skb_put(skb, len), data, len);
4370
4371                 count -= len;
4372                 data += len;
4373                 scb->expect -= len;
4374                 remain = count;
4375
4376                 switch (type) {
4377                 case HCI_EVENT_PKT:
4378                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4379                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4380                                 scb->expect = h->plen;
4381
4382                                 if (skb_tailroom(skb) < scb->expect) {
4383                                         kfree_skb(skb);
4384                                         hdev->reassembly[index] = NULL;
4385                                         return -ENOMEM;
4386                                 }
4387                         }
4388                         break;
4389
4390                 case HCI_ACLDATA_PKT:
4391                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4392                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4393                                 scb->expect = __le16_to_cpu(h->dlen);
4394
4395                                 if (skb_tailroom(skb) < scb->expect) {
4396                                         kfree_skb(skb);
4397                                         hdev->reassembly[index] = NULL;
4398                                         return -ENOMEM;
4399                                 }
4400                         }
4401                         break;
4402
4403                 case HCI_SCODATA_PKT:
4404                         if (skb->len == HCI_SCO_HDR_SIZE) {
4405                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4406                                 scb->expect = h->dlen;
4407
4408                                 if (skb_tailroom(skb) < scb->expect) {
4409                                         kfree_skb(skb);
4410                                         hdev->reassembly[index] = NULL;
4411                                         return -ENOMEM;
4412                                 }
4413                         }
4414                         break;
4415                 }
4416
4417                 if (scb->expect == 0) {
4418                         /* Complete frame */
4419
4420                         bt_cb(skb)->pkt_type = type;
4421                         hci_recv_frame(hdev, skb);
4422
4423                         hdev->reassembly[index] = NULL;
4424                         return remain;
4425                 }
4426         }
4427
4428         return remain;
4429 }
4430
4431 #define STREAM_REASSEMBLY 0
4432
4433 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4434 {
4435         int type;
4436         int rem = 0;
4437
4438         while (count) {
4439                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4440
4441                 if (!skb) {
4442                         struct { char type; } *pkt;
4443
4444                         /* Start of the frame */
4445                         pkt = data;
4446                         type = pkt->type;
4447
4448                         data++;
4449                         count--;
4450                 } else
4451                         type = bt_cb(skb)->pkt_type;
4452
4453                 rem = hci_reassembly(hdev, type, data, count,
4454                                      STREAM_REASSEMBLY);
4455                 if (rem < 0)
4456                         return rem;
4457
4458                 data += (count - rem);
4459                 count = rem;
4460         }
4461
4462         return rem;
4463 }
4464 EXPORT_SYMBOL(hci_recv_stream_fragment);
4465
4466 /* ---- Interface to upper protocols ---- */
4467
4468 int hci_register_cb(struct hci_cb *cb)
4469 {
4470         BT_DBG("%p name %s", cb, cb->name);
4471
4472         write_lock(&hci_cb_list_lock);
4473         list_add(&cb->list, &hci_cb_list);
4474         write_unlock(&hci_cb_list_lock);
4475
4476         return 0;
4477 }
4478 EXPORT_SYMBOL(hci_register_cb);
4479
4480 int hci_unregister_cb(struct hci_cb *cb)
4481 {
4482         BT_DBG("%p name %s", cb, cb->name);
4483
4484         write_lock(&hci_cb_list_lock);
4485         list_del(&cb->list);
4486         write_unlock(&hci_cb_list_lock);
4487
4488         return 0;
4489 }
4490 EXPORT_SYMBOL(hci_unregister_cb);
4491
4492 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4493 {
4494         int err;
4495
4496         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4497
4498         /* Time stamp */
4499         __net_timestamp(skb);
4500
4501         /* Send copy to monitor */
4502         hci_send_to_monitor(hdev, skb);
4503
4504         if (atomic_read(&hdev->promisc)) {
4505                 /* Send copy to the sockets */
4506                 hci_send_to_sock(hdev, skb);
4507         }
4508
4509         /* Get rid of skb owner, prior to sending to the driver. */
4510         skb_orphan(skb);
4511
4512         err = hdev->send(hdev, skb);
4513         if (err < 0) {
4514                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4515                 kfree_skb(skb);
4516         }
4517 }
4518
4519 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4520 {
4521         skb_queue_head_init(&req->cmd_q);
4522         req->hdev = hdev;
4523         req->err = 0;
4524 }
4525
4526 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4527 {
4528         struct hci_dev *hdev = req->hdev;
4529         struct sk_buff *skb;
4530         unsigned long flags;
4531
4532         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4533
4534         /* If an error occurred during request building, remove all HCI
4535          * commands queued on the HCI request queue.
4536          */
4537         if (req->err) {
4538                 skb_queue_purge(&req->cmd_q);
4539                 return req->err;
4540         }
4541
4542         /* Do not allow empty requests */
4543         if (skb_queue_empty(&req->cmd_q))
4544                 return -ENODATA;
4545
4546         skb = skb_peek_tail(&req->cmd_q);
4547         bt_cb(skb)->req.complete = complete;
4548
4549         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4550         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4551         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4552
4553         queue_work(hdev->workqueue, &hdev->cmd_work);
4554
4555         return 0;
4556 }
4557
4558 bool hci_req_pending(struct hci_dev *hdev)
4559 {
4560         return (hdev->req_status == HCI_REQ_PEND);
4561 }
4562
4563 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4564                                        u32 plen, const void *param)
4565 {
4566         int len = HCI_COMMAND_HDR_SIZE + plen;
4567         struct hci_command_hdr *hdr;
4568         struct sk_buff *skb;
4569
4570         skb = bt_skb_alloc(len, GFP_ATOMIC);
4571         if (!skb)
4572                 return NULL;
4573
4574         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4575         hdr->opcode = cpu_to_le16(opcode);
4576         hdr->plen   = plen;
4577
4578         if (plen)
4579                 memcpy(skb_put(skb, plen), param, plen);
4580
4581         BT_DBG("skb len %d", skb->len);
4582
4583         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4584         bt_cb(skb)->opcode = opcode;
4585
4586         return skb;
4587 }
4588
4589 /* Send HCI command */
4590 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4591                  const void *param)
4592 {
4593         struct sk_buff *skb;
4594
4595         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4596
4597         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4598         if (!skb) {
4599                 BT_ERR("%s no memory for command", hdev->name);
4600                 return -ENOMEM;
4601         }
4602
4603         /* Stand-alone HCI commands must be flagged as
4604          * single-command requests.
4605          */
4606         bt_cb(skb)->req.start = true;
4607
4608         skb_queue_tail(&hdev->cmd_q, skb);
4609         queue_work(hdev->workqueue, &hdev->cmd_work);
4610
4611         return 0;
4612 }
4613
4614 /* Queue a command to an asynchronous HCI request */
4615 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4616                     const void *param, u8 event)
4617 {
4618         struct hci_dev *hdev = req->hdev;
4619         struct sk_buff *skb;
4620
4621         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4622
4623         /* If an error occurred during request building, there is no point in
4624          * queueing the HCI command. We can simply return.
4625          */
4626         if (req->err)
4627                 return;
4628
4629         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4630         if (!skb) {
4631                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4632                        hdev->name, opcode);
4633                 req->err = -ENOMEM;
4634                 return;
4635         }
4636
4637         if (skb_queue_empty(&req->cmd_q))
4638                 bt_cb(skb)->req.start = true;
4639
4640         bt_cb(skb)->req.event = event;
4641
4642         skb_queue_tail(&req->cmd_q, skb);
4643 }
4644
4645 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4646                  const void *param)
4647 {
4648         hci_req_add_ev(req, opcode, plen, param, 0);
4649 }
4650
4651 /* Get data from the previously sent command */
4652 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4653 {
4654         struct hci_command_hdr *hdr;
4655
4656         if (!hdev->sent_cmd)
4657                 return NULL;
4658
4659         hdr = (void *) hdev->sent_cmd->data;
4660
4661         if (hdr->opcode != cpu_to_le16(opcode))
4662                 return NULL;
4663
4664         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4665
4666         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4667 }
4668
4669 /* Send ACL data */
4670 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4671 {
4672         struct hci_acl_hdr *hdr;
4673         int len = skb->len;
4674
4675         skb_push(skb, HCI_ACL_HDR_SIZE);
4676         skb_reset_transport_header(skb);
4677         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4678         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4679         hdr->dlen   = cpu_to_le16(len);
4680 }
4681
4682 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4683                           struct sk_buff *skb, __u16 flags)
4684 {
4685         struct hci_conn *conn = chan->conn;
4686         struct hci_dev *hdev = conn->hdev;
4687         struct sk_buff *list;
4688
4689         skb->len = skb_headlen(skb);
4690         skb->data_len = 0;
4691
4692         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4693
4694         switch (hdev->dev_type) {
4695         case HCI_BREDR:
4696                 hci_add_acl_hdr(skb, conn->handle, flags);
4697                 break;
4698         case HCI_AMP:
4699                 hci_add_acl_hdr(skb, chan->handle, flags);
4700                 break;
4701         default:
4702                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4703                 return;
4704         }
4705
4706         list = skb_shinfo(skb)->frag_list;
4707         if (!list) {
4708                 /* Non fragmented */
4709                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4710
4711                 skb_queue_tail(queue, skb);
4712         } else {
4713                 /* Fragmented */
4714                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4715
4716                 skb_shinfo(skb)->frag_list = NULL;
4717
4718                 /* Queue all fragments atomically. We need to use spin_lock_bh
4719                  * here because of 6LoWPAN links, as there this function is
4720                  * called from softirq and using normal spin lock could cause
4721                  * deadlocks.
4722                  */
4723                 spin_lock_bh(&queue->lock);
4724
4725                 __skb_queue_tail(queue, skb);
4726
4727                 flags &= ~ACL_START;
4728                 flags |= ACL_CONT;
4729                 do {
4730                         skb = list; list = list->next;
4731
4732                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4733                         hci_add_acl_hdr(skb, conn->handle, flags);
4734
4735                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4736
4737                         __skb_queue_tail(queue, skb);
4738                 } while (list);
4739
4740                 spin_unlock_bh(&queue->lock);
4741         }
4742 }
4743
4744 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4745 {
4746         struct hci_dev *hdev = chan->conn->hdev;
4747
4748         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4749
4750         hci_queue_acl(chan, &chan->data_q, skb, flags);
4751
4752         queue_work(hdev->workqueue, &hdev->tx_work);
4753 }
4754
4755 /* Send SCO data */
4756 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4757 {
4758         struct hci_dev *hdev = conn->hdev;
4759         struct hci_sco_hdr hdr;
4760
4761         BT_DBG("%s len %d", hdev->name, skb->len);
4762
4763         hdr.handle = cpu_to_le16(conn->handle);
4764         hdr.dlen   = skb->len;
4765
4766         skb_push(skb, HCI_SCO_HDR_SIZE);
4767         skb_reset_transport_header(skb);
4768         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4769
4770         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4771
4772         skb_queue_tail(&conn->data_q, skb);
4773         queue_work(hdev->workqueue, &hdev->tx_work);
4774 }
4775
4776 /* ---- HCI TX task (outgoing data) ---- */
4777
4778 /* HCI Connection scheduler */
4779 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4780                                      int *quote)
4781 {
4782         struct hci_conn_hash *h = &hdev->conn_hash;
4783         struct hci_conn *conn = NULL, *c;
4784         unsigned int num = 0, min = ~0;
4785
4786         /* We don't have to lock device here. Connections are always
4787          * added and removed with TX task disabled. */
4788
4789         rcu_read_lock();
4790
4791         list_for_each_entry_rcu(c, &h->list, list) {
4792                 if (c->type != type || skb_queue_empty(&c->data_q))
4793                         continue;
4794
4795                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4796                         continue;
4797
4798                 num++;
4799
4800                 if (c->sent < min) {
4801                         min  = c->sent;
4802                         conn = c;
4803                 }
4804
4805                 if (hci_conn_num(hdev, type) == num)
4806                         break;
4807         }
4808
4809         rcu_read_unlock();
4810
4811         if (conn) {
4812                 int cnt, q;
4813
4814                 switch (conn->type) {
4815                 case ACL_LINK:
4816                         cnt = hdev->acl_cnt;
4817                         break;
4818                 case SCO_LINK:
4819                 case ESCO_LINK:
4820                         cnt = hdev->sco_cnt;
4821                         break;
4822                 case LE_LINK:
4823                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4824                         break;
4825                 default:
4826                         cnt = 0;
4827                         BT_ERR("Unknown link type");
4828                 }
4829
4830                 q = cnt / num;
4831                 *quote = q ? q : 1;
4832         } else
4833                 *quote = 0;
4834
4835         BT_DBG("conn %p quote %d", conn, *quote);
4836         return conn;
4837 }
4838
4839 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4840 {
4841         struct hci_conn_hash *h = &hdev->conn_hash;
4842         struct hci_conn *c;
4843
4844         BT_ERR("%s link tx timeout", hdev->name);
4845
4846         rcu_read_lock();
4847
4848         /* Kill stalled connections */
4849         list_for_each_entry_rcu(c, &h->list, list) {
4850                 if (c->type == type && c->sent) {
4851                         BT_ERR("%s killing stalled connection %pMR",
4852                                hdev->name, &c->dst);
4853                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4854                 }
4855         }
4856
4857         rcu_read_unlock();
4858 }
4859
4860 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4861                                       int *quote)
4862 {
4863         struct hci_conn_hash *h = &hdev->conn_hash;
4864         struct hci_chan *chan = NULL;
4865         unsigned int num = 0, min = ~0, cur_prio = 0;
4866         struct hci_conn *conn;
4867         int cnt, q, conn_num = 0;
4868
4869         BT_DBG("%s", hdev->name);
4870
4871         rcu_read_lock();
4872
4873         list_for_each_entry_rcu(conn, &h->list, list) {
4874                 struct hci_chan *tmp;
4875
4876                 if (conn->type != type)
4877                         continue;
4878
4879                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4880                         continue;
4881
4882                 conn_num++;
4883
4884                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4885                         struct sk_buff *skb;
4886
4887                         if (skb_queue_empty(&tmp->data_q))
4888                                 continue;
4889
4890                         skb = skb_peek(&tmp->data_q);
4891                         if (skb->priority < cur_prio)
4892                                 continue;
4893
4894                         if (skb->priority > cur_prio) {
4895                                 num = 0;
4896                                 min = ~0;
4897                                 cur_prio = skb->priority;
4898                         }
4899
4900                         num++;
4901
4902                         if (conn->sent < min) {
4903                                 min  = conn->sent;
4904                                 chan = tmp;
4905                         }
4906                 }
4907
4908                 if (hci_conn_num(hdev, type) == conn_num)
4909                         break;
4910         }
4911
4912         rcu_read_unlock();
4913
4914         if (!chan)
4915                 return NULL;
4916
4917         switch (chan->conn->type) {
4918         case ACL_LINK:
4919                 cnt = hdev->acl_cnt;
4920                 break;
4921         case AMP_LINK:
4922                 cnt = hdev->block_cnt;
4923                 break;
4924         case SCO_LINK:
4925         case ESCO_LINK:
4926                 cnt = hdev->sco_cnt;
4927                 break;
4928         case LE_LINK:
4929                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4930                 break;
4931         default:
4932                 cnt = 0;
4933                 BT_ERR("Unknown link type");
4934         }
4935
4936         q = cnt / num;
4937         *quote = q ? q : 1;
4938         BT_DBG("chan %p quote %d", chan, *quote);
4939         return chan;
4940 }
4941
4942 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4943 {
4944         struct hci_conn_hash *h = &hdev->conn_hash;
4945         struct hci_conn *conn;
4946         int num = 0;
4947
4948         BT_DBG("%s", hdev->name);
4949
4950         rcu_read_lock();
4951
4952         list_for_each_entry_rcu(conn, &h->list, list) {
4953                 struct hci_chan *chan;
4954
4955                 if (conn->type != type)
4956                         continue;
4957
4958                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4959                         continue;
4960
4961                 num++;
4962
4963                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4964                         struct sk_buff *skb;
4965
4966                         if (chan->sent) {
4967                                 chan->sent = 0;
4968                                 continue;
4969                         }
4970
4971                         if (skb_queue_empty(&chan->data_q))
4972                                 continue;
4973
4974                         skb = skb_peek(&chan->data_q);
4975                         if (skb->priority >= HCI_PRIO_MAX - 1)
4976                                 continue;
4977
4978                         skb->priority = HCI_PRIO_MAX - 1;
4979
4980                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4981                                skb->priority);
4982                 }
4983
4984                 if (hci_conn_num(hdev, type) == num)
4985                         break;
4986         }
4987
4988         rcu_read_unlock();
4989
4990 }
4991
4992 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4993 {
4994         /* Calculate count of blocks used by this packet */
4995         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4996 }
4997
4998 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4999 {
5000         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5001                 /* ACL tx timeout must be longer than maximum
5002                  * link supervision timeout (40.9 seconds) */
5003                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5004                                        HCI_ACL_TX_TIMEOUT))
5005                         hci_link_tx_to(hdev, ACL_LINK);
5006         }
5007 }
5008
5009 static void hci_sched_acl_pkt(struct hci_dev *hdev)
5010 {
5011         unsigned int cnt = hdev->acl_cnt;
5012         struct hci_chan *chan;
5013         struct sk_buff *skb;
5014         int quote;
5015
5016         __check_timeout(hdev, cnt);
5017
5018         while (hdev->acl_cnt &&
5019                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
5020                 u32 priority = (skb_peek(&chan->data_q))->priority;
5021                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5022                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5023                                skb->len, skb->priority);
5024
5025                         /* Stop if priority has changed */
5026                         if (skb->priority < priority)
5027                                 break;
5028
5029                         skb = skb_dequeue(&chan->data_q);
5030
5031                         hci_conn_enter_active_mode(chan->conn,
5032                                                    bt_cb(skb)->force_active);
5033
5034                         hci_send_frame(hdev, skb);
5035                         hdev->acl_last_tx = jiffies;
5036
5037                         hdev->acl_cnt--;
5038                         chan->sent++;
5039                         chan->conn->sent++;
5040                 }
5041         }
5042
5043         if (cnt != hdev->acl_cnt)
5044                 hci_prio_recalculate(hdev, ACL_LINK);
5045 }
5046
5047 static void hci_sched_acl_blk(struct hci_dev *hdev)
5048 {
5049         unsigned int cnt = hdev->block_cnt;
5050         struct hci_chan *chan;
5051         struct sk_buff *skb;
5052         int quote;
5053         u8 type;
5054
5055         __check_timeout(hdev, cnt);
5056
5057         BT_DBG("%s", hdev->name);
5058
5059         if (hdev->dev_type == HCI_AMP)
5060                 type = AMP_LINK;
5061         else
5062                 type = ACL_LINK;
5063
5064         while (hdev->block_cnt > 0 &&
5065                (chan = hci_chan_sent(hdev, type, &quote))) {
5066                 u32 priority = (skb_peek(&chan->data_q))->priority;
5067                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5068                         int blocks;
5069
5070                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5071                                skb->len, skb->priority);
5072
5073                         /* Stop if priority has changed */
5074                         if (skb->priority < priority)
5075                                 break;
5076
5077                         skb = skb_dequeue(&chan->data_q);
5078
5079                         blocks = __get_blocks(hdev, skb);
5080                         if (blocks > hdev->block_cnt)
5081                                 return;
5082
5083                         hci_conn_enter_active_mode(chan->conn,
5084                                                    bt_cb(skb)->force_active);
5085
5086                         hci_send_frame(hdev, skb);
5087                         hdev->acl_last_tx = jiffies;
5088
5089                         hdev->block_cnt -= blocks;
5090                         quote -= blocks;
5091
5092                         chan->sent += blocks;
5093                         chan->conn->sent += blocks;
5094                 }
5095         }
5096
5097         if (cnt != hdev->block_cnt)
5098                 hci_prio_recalculate(hdev, type);
5099 }
5100
5101 static void hci_sched_acl(struct hci_dev *hdev)
5102 {
5103         BT_DBG("%s", hdev->name);
5104
5105         /* No ACL link over BR/EDR controller */
5106         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5107                 return;
5108
5109         /* No AMP link over AMP controller */
5110         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5111                 return;
5112
5113         switch (hdev->flow_ctl_mode) {
5114         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5115                 hci_sched_acl_pkt(hdev);
5116                 break;
5117
5118         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5119                 hci_sched_acl_blk(hdev);
5120                 break;
5121         }
5122 }
5123
5124 /* Schedule SCO */
5125 static void hci_sched_sco(struct hci_dev *hdev)
5126 {
5127         struct hci_conn *conn;
5128         struct sk_buff *skb;
5129         int quote;
5130
5131         BT_DBG("%s", hdev->name);
5132
5133         if (!hci_conn_num(hdev, SCO_LINK))
5134                 return;
5135
5136         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5137                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5138                         BT_DBG("skb %p len %d", skb, skb->len);
5139                         hci_send_frame(hdev, skb);
5140
5141                         conn->sent++;
5142                         if (conn->sent == ~0)
5143                                 conn->sent = 0;
5144                 }
5145         }
5146 }
5147
5148 static void hci_sched_esco(struct hci_dev *hdev)
5149 {
5150         struct hci_conn *conn;
5151         struct sk_buff *skb;
5152         int quote;
5153
5154         BT_DBG("%s", hdev->name);
5155
5156         if (!hci_conn_num(hdev, ESCO_LINK))
5157                 return;
5158
5159         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5160                                                      &quote))) {
5161                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5162                         BT_DBG("skb %p len %d", skb, skb->len);
5163                         hci_send_frame(hdev, skb);
5164
5165                         conn->sent++;
5166                         if (conn->sent == ~0)
5167                                 conn->sent = 0;
5168                 }
5169         }
5170 }
5171
5172 static void hci_sched_le(struct hci_dev *hdev)
5173 {
5174         struct hci_chan *chan;
5175         struct sk_buff *skb;
5176         int quote, cnt, tmp;
5177
5178         BT_DBG("%s", hdev->name);
5179
5180         if (!hci_conn_num(hdev, LE_LINK))
5181                 return;
5182
5183         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5184                 /* LE tx timeout must be longer than maximum
5185                  * link supervision timeout (40.9 seconds) */
5186                 if (!hdev->le_cnt && hdev->le_pkts &&
5187                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5188                         hci_link_tx_to(hdev, LE_LINK);
5189         }
5190
5191         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5192         tmp = cnt;
5193         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5194                 u32 priority = (skb_peek(&chan->data_q))->priority;
5195                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5196                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5197                                skb->len, skb->priority);
5198
5199                         /* Stop if priority has changed */
5200                         if (skb->priority < priority)
5201                                 break;
5202
5203                         skb = skb_dequeue(&chan->data_q);
5204
5205                         hci_send_frame(hdev, skb);
5206                         hdev->le_last_tx = jiffies;
5207
5208                         cnt--;
5209                         chan->sent++;
5210                         chan->conn->sent++;
5211                 }
5212         }
5213
5214         if (hdev->le_pkts)
5215                 hdev->le_cnt = cnt;
5216         else
5217                 hdev->acl_cnt = cnt;
5218
5219         if (cnt != tmp)
5220                 hci_prio_recalculate(hdev, LE_LINK);
5221 }
5222
5223 static void hci_tx_work(struct work_struct *work)
5224 {
5225         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5226         struct sk_buff *skb;
5227
5228         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5229                hdev->sco_cnt, hdev->le_cnt);
5230
5231         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5232                 /* Schedule queues and send stuff to HCI driver */
5233                 hci_sched_acl(hdev);
5234                 hci_sched_sco(hdev);
5235                 hci_sched_esco(hdev);
5236                 hci_sched_le(hdev);
5237         }
5238
5239         /* Send next queued raw (unknown type) packet */
5240         while ((skb = skb_dequeue(&hdev->raw_q)))
5241                 hci_send_frame(hdev, skb);
5242 }
5243
5244 /* ----- HCI RX task (incoming data processing) ----- */
5245
5246 /* ACL data packet */
5247 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5248 {
5249         struct hci_acl_hdr *hdr = (void *) skb->data;
5250         struct hci_conn *conn;
5251         __u16 handle, flags;
5252
5253         skb_pull(skb, HCI_ACL_HDR_SIZE);
5254
5255         handle = __le16_to_cpu(hdr->handle);
5256         flags  = hci_flags(handle);
5257         handle = hci_handle(handle);
5258
5259         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5260                handle, flags);
5261
5262         hdev->stat.acl_rx++;
5263
5264         hci_dev_lock(hdev);
5265         conn = hci_conn_hash_lookup_handle(hdev, handle);
5266         hci_dev_unlock(hdev);
5267
5268         if (conn) {
5269                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5270
5271                 /* Send to upper protocol */
5272                 l2cap_recv_acldata(conn, skb, flags);
5273                 return;
5274         } else {
5275                 BT_ERR("%s ACL packet for unknown connection handle %d",
5276                        hdev->name, handle);
5277         }
5278
5279         kfree_skb(skb);
5280 }
5281
5282 /* SCO data packet */
5283 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5284 {
5285         struct hci_sco_hdr *hdr = (void *) skb->data;
5286         struct hci_conn *conn;
5287         __u16 handle;
5288
5289         skb_pull(skb, HCI_SCO_HDR_SIZE);
5290
5291         handle = __le16_to_cpu(hdr->handle);
5292
5293         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5294
5295         hdev->stat.sco_rx++;
5296
5297         hci_dev_lock(hdev);
5298         conn = hci_conn_hash_lookup_handle(hdev, handle);
5299         hci_dev_unlock(hdev);
5300
5301         if (conn) {
5302                 /* Send to upper protocol */
5303                 sco_recv_scodata(conn, skb);
5304                 return;
5305         } else {
5306                 BT_ERR("%s SCO packet for unknown connection handle %d",
5307                        hdev->name, handle);
5308         }
5309
5310         kfree_skb(skb);
5311 }
5312
5313 static bool hci_req_is_complete(struct hci_dev *hdev)
5314 {
5315         struct sk_buff *skb;
5316
5317         skb = skb_peek(&hdev->cmd_q);
5318         if (!skb)
5319                 return true;
5320
5321         return bt_cb(skb)->req.start;
5322 }
5323
5324 static void hci_resend_last(struct hci_dev *hdev)
5325 {
5326         struct hci_command_hdr *sent;
5327         struct sk_buff *skb;
5328         u16 opcode;
5329
5330         if (!hdev->sent_cmd)
5331                 return;
5332
5333         sent = (void *) hdev->sent_cmd->data;
5334         opcode = __le16_to_cpu(sent->opcode);
5335         if (opcode == HCI_OP_RESET)
5336                 return;
5337
5338         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5339         if (!skb)
5340                 return;
5341
5342         skb_queue_head(&hdev->cmd_q, skb);
5343         queue_work(hdev->workqueue, &hdev->cmd_work);
5344 }
5345
5346 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5347 {
5348         hci_req_complete_t req_complete = NULL;
5349         struct sk_buff *skb;
5350         unsigned long flags;
5351
5352         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5353
5354         /* If the completed command doesn't match the last one that was
5355          * sent we need to do special handling of it.
5356          */
5357         if (!hci_sent_cmd_data(hdev, opcode)) {
5358                 /* Some CSR based controllers generate a spontaneous
5359                  * reset complete event during init and any pending
5360                  * command will never be completed. In such a case we
5361                  * need to resend whatever was the last sent
5362                  * command.
5363                  */
5364                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5365                         hci_resend_last(hdev);
5366
5367                 return;
5368         }
5369
5370         /* If the command succeeded and there's still more commands in
5371          * this request the request is not yet complete.
5372          */
5373         if (!status && !hci_req_is_complete(hdev))
5374                 return;
5375
5376         /* If this was the last command in a request the complete
5377          * callback would be found in hdev->sent_cmd instead of the
5378          * command queue (hdev->cmd_q).
5379          */
5380         if (hdev->sent_cmd) {
5381                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5382
5383                 if (req_complete) {
5384                         /* We must set the complete callback to NULL to
5385                          * avoid calling the callback more than once if
5386                          * this function gets called again.
5387                          */
5388                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5389
5390                         goto call_complete;
5391                 }
5392         }
5393
5394         /* Remove all pending commands belonging to this request */
5395         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5396         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5397                 if (bt_cb(skb)->req.start) {
5398                         __skb_queue_head(&hdev->cmd_q, skb);
5399                         break;
5400                 }
5401
5402                 req_complete = bt_cb(skb)->req.complete;
5403                 kfree_skb(skb);
5404         }
5405         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5406
5407 call_complete:
5408         if (req_complete)
5409                 req_complete(hdev, status);
5410 }
5411
5412 static void hci_rx_work(struct work_struct *work)
5413 {
5414         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5415         struct sk_buff *skb;
5416
5417         BT_DBG("%s", hdev->name);
5418
5419         while ((skb = skb_dequeue(&hdev->rx_q))) {
5420                 /* Send copy to monitor */
5421                 hci_send_to_monitor(hdev, skb);
5422
5423                 if (atomic_read(&hdev->promisc)) {
5424                         /* Send copy to the sockets */
5425                         hci_send_to_sock(hdev, skb);
5426                 }
5427
5428                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5429                         kfree_skb(skb);
5430                         continue;
5431                 }
5432
5433                 if (test_bit(HCI_INIT, &hdev->flags)) {
5434                         /* Don't process data packets in this states. */
5435                         switch (bt_cb(skb)->pkt_type) {
5436                         case HCI_ACLDATA_PKT:
5437                         case HCI_SCODATA_PKT:
5438                                 kfree_skb(skb);
5439                                 continue;
5440                         }
5441                 }
5442
5443                 /* Process frame */
5444                 switch (bt_cb(skb)->pkt_type) {
5445                 case HCI_EVENT_PKT:
5446                         BT_DBG("%s Event packet", hdev->name);
5447                         hci_event_packet(hdev, skb);
5448                         break;
5449
5450                 case HCI_ACLDATA_PKT:
5451                         BT_DBG("%s ACL data packet", hdev->name);
5452                         hci_acldata_packet(hdev, skb);
5453                         break;
5454
5455                 case HCI_SCODATA_PKT:
5456                         BT_DBG("%s SCO data packet", hdev->name);
5457                         hci_scodata_packet(hdev, skb);
5458                         break;
5459
5460                 default:
5461                         kfree_skb(skb);
5462                         break;
5463                 }
5464         }
5465 }
5466
5467 static void hci_cmd_work(struct work_struct *work)
5468 {
5469         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5470         struct sk_buff *skb;
5471
5472         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5473                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5474
5475         /* Send queued commands */
5476         if (atomic_read(&hdev->cmd_cnt)) {
5477                 skb = skb_dequeue(&hdev->cmd_q);
5478                 if (!skb)
5479                         return;
5480
5481                 kfree_skb(hdev->sent_cmd);
5482
5483                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5484                 if (hdev->sent_cmd) {
5485                         atomic_dec(&hdev->cmd_cnt);
5486                         hci_send_frame(hdev, skb);
5487                         if (test_bit(HCI_RESET, &hdev->flags))
5488                                 cancel_delayed_work(&hdev->cmd_timer);
5489                         else
5490                                 schedule_delayed_work(&hdev->cmd_timer,
5491                                                       HCI_CMD_TIMEOUT);
5492                 } else {
5493                         skb_queue_head(&hdev->cmd_q, skb);
5494                         queue_work(hdev->workqueue, &hdev->cmd_work);
5495                 }
5496         }
5497 }
5498
5499 void hci_req_add_le_scan_disable(struct hci_request *req)
5500 {
5501         struct hci_cp_le_set_scan_enable cp;
5502
5503         memset(&cp, 0, sizeof(cp));
5504         cp.enable = LE_SCAN_DISABLE;
5505         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5506 }
5507
5508 static void add_to_white_list(struct hci_request *req,
5509                               struct hci_conn_params *params)
5510 {
5511         struct hci_cp_le_add_to_white_list cp;
5512
5513         cp.bdaddr_type = params->addr_type;
5514         bacpy(&cp.bdaddr, &params->addr);
5515
5516         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5517 }
5518
5519 static u8 update_white_list(struct hci_request *req)
5520 {
5521         struct hci_dev *hdev = req->hdev;
5522         struct hci_conn_params *params;
5523         struct bdaddr_list *b;
5524         uint8_t white_list_entries = 0;
5525
5526         /* Go through the current white list programmed into the
5527          * controller one by one and check if that address is still
5528          * in the list of pending connections or list of devices to
5529          * report. If not present in either list, then queue the
5530          * command to remove it from the controller.
5531          */
5532         list_for_each_entry(b, &hdev->le_white_list, list) {
5533                 struct hci_cp_le_del_from_white_list cp;
5534
5535                 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5536                                               &b->bdaddr, b->bdaddr_type) ||
5537                     hci_pend_le_action_lookup(&hdev->pend_le_reports,
5538                                               &b->bdaddr, b->bdaddr_type)) {
5539                         white_list_entries++;
5540                         continue;
5541                 }
5542
5543                 cp.bdaddr_type = b->bdaddr_type;
5544                 bacpy(&cp.bdaddr, &b->bdaddr);
5545
5546                 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5547                             sizeof(cp), &cp);
5548         }
5549
5550         /* Since all no longer valid white list entries have been
5551          * removed, walk through the list of pending connections
5552          * and ensure that any new device gets programmed into
5553          * the controller.
5554          *
5555          * If the list of the devices is larger than the list of
5556          * available white list entries in the controller, then
5557          * just abort and return filer policy value to not use the
5558          * white list.
5559          */
5560         list_for_each_entry(params, &hdev->pend_le_conns, action) {
5561                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5562                                            &params->addr, params->addr_type))
5563                         continue;
5564
5565                 if (white_list_entries >= hdev->le_white_list_size) {
5566                         /* Select filter policy to accept all advertising */
5567                         return 0x00;
5568                 }
5569
5570                 if (hci_find_irk_by_addr(hdev, &params->addr,
5571                                          params->addr_type)) {
5572                         /* White list can not be used with RPAs */
5573                         return 0x00;
5574                 }
5575
5576                 white_list_entries++;
5577                 add_to_white_list(req, params);
5578         }
5579
5580         /* After adding all new pending connections, walk through
5581          * the list of pending reports and also add these to the
5582          * white list if there is still space.
5583          */
5584         list_for_each_entry(params, &hdev->pend_le_reports, action) {
5585                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5586                                            &params->addr, params->addr_type))
5587                         continue;
5588
5589                 if (white_list_entries >= hdev->le_white_list_size) {
5590                         /* Select filter policy to accept all advertising */
5591                         return 0x00;
5592                 }
5593
5594                 if (hci_find_irk_by_addr(hdev, &params->addr,
5595                                          params->addr_type)) {
5596                         /* White list can not be used with RPAs */
5597                         return 0x00;
5598                 }
5599
5600                 white_list_entries++;
5601                 add_to_white_list(req, params);
5602         }
5603
5604         /* Select filter policy to use white list */
5605         return 0x01;
5606 }
5607
5608 void hci_req_add_le_passive_scan(struct hci_request *req)
5609 {
5610         struct hci_cp_le_set_scan_param param_cp;
5611         struct hci_cp_le_set_scan_enable enable_cp;
5612         struct hci_dev *hdev = req->hdev;
5613         u8 own_addr_type;
5614         u8 filter_policy;
5615
5616         /* Set require_privacy to false since no SCAN_REQ are send
5617          * during passive scanning. Not using an unresolvable address
5618          * here is important so that peer devices using direct
5619          * advertising with our address will be correctly reported
5620          * by the controller.
5621          */
5622         if (hci_update_random_address(req, false, &own_addr_type))
5623                 return;
5624
5625         /* Adding or removing entries from the white list must
5626          * happen before enabling scanning. The controller does
5627          * not allow white list modification while scanning.
5628          */
5629         filter_policy = update_white_list(req);
5630
5631         memset(&param_cp, 0, sizeof(param_cp));
5632         param_cp.type = LE_SCAN_PASSIVE;
5633         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5634         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5635         param_cp.own_address_type = own_addr_type;
5636         param_cp.filter_policy = filter_policy;
5637         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5638                     &param_cp);
5639
5640         memset(&enable_cp, 0, sizeof(enable_cp));
5641         enable_cp.enable = LE_SCAN_ENABLE;
5642         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5643         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5644                     &enable_cp);
5645 }
5646
5647 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5648 {
5649         if (status)
5650                 BT_DBG("HCI request failed to update background scanning: "
5651                        "status 0x%2.2x", status);
5652 }
5653
5654 /* This function controls the background scanning based on hdev->pend_le_conns
5655  * list. If there are pending LE connection we start the background scanning,
5656  * otherwise we stop it.
5657  *
5658  * This function requires the caller holds hdev->lock.
5659  */
5660 void hci_update_background_scan(struct hci_dev *hdev)
5661 {
5662         struct hci_request req;
5663         struct hci_conn *conn;
5664         int err;
5665
5666         if (!test_bit(HCI_UP, &hdev->flags) ||
5667             test_bit(HCI_INIT, &hdev->flags) ||
5668             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5669             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5670             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5671             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5672                 return;
5673
5674         /* No point in doing scanning if LE support hasn't been enabled */
5675         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5676                 return;
5677
5678         /* If discovery is active don't interfere with it */
5679         if (hdev->discovery.state != DISCOVERY_STOPPED)
5680                 return;
5681
5682         hci_req_init(&req, hdev);
5683
5684         if (list_empty(&hdev->pend_le_conns) &&
5685             list_empty(&hdev->pend_le_reports)) {
5686                 /* If there is no pending LE connections or devices
5687                  * to be scanned for, we should stop the background
5688                  * scanning.
5689                  */
5690
5691                 /* If controller is not scanning we are done. */
5692                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5693                         return;
5694
5695                 hci_req_add_le_scan_disable(&req);
5696
5697                 BT_DBG("%s stopping background scanning", hdev->name);
5698         } else {
5699                 /* If there is at least one pending LE connection, we should
5700                  * keep the background scan running.
5701                  */
5702
5703                 /* If controller is connecting, we should not start scanning
5704                  * since some controllers are not able to scan and connect at
5705                  * the same time.
5706                  */
5707                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5708                 if (conn)
5709                         return;
5710
5711                 /* If controller is currently scanning, we stop it to ensure we
5712                  * don't miss any advertising (due to duplicates filter).
5713                  */
5714                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5715                         hci_req_add_le_scan_disable(&req);
5716
5717                 hci_req_add_le_passive_scan(&req);
5718
5719                 BT_DBG("%s starting background scanning", hdev->name);
5720         }
5721
5722         err = hci_req_run(&req, update_background_scan_complete);
5723         if (err)
5724                 BT_ERR("Failed to run HCI request: err %d", err);
5725 }
5726
5727 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5728 {
5729         struct bdaddr_list *b;
5730
5731         list_for_each_entry(b, &hdev->whitelist, list) {
5732                 struct hci_conn *conn;
5733
5734                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5735                 if (!conn)
5736                         return true;
5737
5738                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5739                         return true;
5740         }
5741
5742         return false;
5743 }
5744
5745 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5746 {
5747         u8 scan;
5748
5749         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5750                 return;
5751
5752         if (!hdev_is_powered(hdev))
5753                 return;
5754
5755         if (mgmt_powering_down(hdev))
5756                 return;
5757
5758         if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5759             disconnected_whitelist_entries(hdev))
5760                 scan = SCAN_PAGE;
5761         else
5762                 scan = SCAN_DISABLED;
5763
5764         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5765                 return;
5766
5767         if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5768                 scan |= SCAN_INQUIRY;
5769
5770         if (req)
5771                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5772         else
5773                 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5774 }