Bluetooth: Get MWS transport configuration of the controller
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE      0
60 #define HCI_REQ_PEND      1
61 #define HCI_REQ_CANCELED  2
62
63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70         hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76                              size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         char buf[3];
80
81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82         buf[1] = '\n';
83         buf[2] = '\0';
84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88                               size_t count, loff_t *ppos)
89 {
90         struct hci_dev *hdev = file->private_data;
91         struct sk_buff *skb;
92         char buf[32];
93         size_t buf_size = min(count, (sizeof(buf)-1));
94         bool enable;
95         int err;
96
97         if (!test_bit(HCI_UP, &hdev->flags))
98                 return -ENETDOWN;
99
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124
125         if (err < 0)
126                 return err;
127
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130         return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162
163         return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187
188         return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202
203 static int whitelist_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bdaddr_list *b;
207
208         hci_dev_lock(hdev);
209         list_for_each_entry(b, &hdev->whitelist, list)
210                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211         hci_dev_unlock(hdev);
212
213         return 0;
214 }
215
216 static int whitelist_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, whitelist_show, inode->i_private);
219 }
220
221 static const struct file_operations whitelist_fops = {
222         .open           = whitelist_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227
228 static int uuids_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct bt_uuid *uuid;
232
233         hci_dev_lock(hdev);
234         list_for_each_entry(uuid, &hdev->uuids, list) {
235                 u8 i, val[16];
236
237                 /* The Bluetooth UUID values are stored in big endian,
238                  * but with reversed byte order. So convert them into
239                  * the right order for the %pUb modifier.
240                  */
241                 for (i = 0; i < 16; i++)
242                         val[i] = uuid->uuid[15 - i];
243
244                 seq_printf(f, "%pUb\n", val);
245         }
246         hci_dev_unlock(hdev);
247
248         return 0;
249 }
250
251 static int uuids_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, uuids_show, inode->i_private);
254 }
255
256 static const struct file_operations uuids_fops = {
257         .open           = uuids_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262
263 static int inquiry_cache_show(struct seq_file *f, void *p)
264 {
265         struct hci_dev *hdev = f->private;
266         struct discovery_state *cache = &hdev->discovery;
267         struct inquiry_entry *e;
268
269         hci_dev_lock(hdev);
270
271         list_for_each_entry(e, &cache->all, all) {
272                 struct inquiry_data *data = &e->data;
273                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274                            &data->bdaddr,
275                            data->pscan_rep_mode, data->pscan_period_mode,
276                            data->pscan_mode, data->dev_class[2],
277                            data->dev_class[1], data->dev_class[0],
278                            __le16_to_cpu(data->clock_offset),
279                            data->rssi, data->ssp_mode, e->timestamp);
280         }
281
282         hci_dev_unlock(hdev);
283
284         return 0;
285 }
286
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
288 {
289         return single_open(file, inquiry_cache_show, inode->i_private);
290 }
291
292 static const struct file_operations inquiry_cache_fops = {
293         .open           = inquiry_cache_open,
294         .read           = seq_read,
295         .llseek         = seq_lseek,
296         .release        = single_release,
297 };
298
299 static int link_keys_show(struct seq_file *f, void *ptr)
300 {
301         struct hci_dev *hdev = f->private;
302         struct list_head *p, *n;
303
304         hci_dev_lock(hdev);
305         list_for_each_safe(p, n, &hdev->link_keys) {
306                 struct link_key *key = list_entry(p, struct link_key, list);
307                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309         }
310         hci_dev_unlock(hdev);
311
312         return 0;
313 }
314
315 static int link_keys_open(struct inode *inode, struct file *file)
316 {
317         return single_open(file, link_keys_show, inode->i_private);
318 }
319
320 static const struct file_operations link_keys_fops = {
321         .open           = link_keys_open,
322         .read           = seq_read,
323         .llseek         = seq_lseek,
324         .release        = single_release,
325 };
326
327 static int dev_class_show(struct seq_file *f, void *ptr)
328 {
329         struct hci_dev *hdev = f->private;
330
331         hci_dev_lock(hdev);
332         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333                    hdev->dev_class[1], hdev->dev_class[0]);
334         hci_dev_unlock(hdev);
335
336         return 0;
337 }
338
339 static int dev_class_open(struct inode *inode, struct file *file)
340 {
341         return single_open(file, dev_class_show, inode->i_private);
342 }
343
344 static const struct file_operations dev_class_fops = {
345         .open           = dev_class_open,
346         .read           = seq_read,
347         .llseek         = seq_lseek,
348         .release        = single_release,
349 };
350
351 static int voice_setting_get(void *data, u64 *val)
352 {
353         struct hci_dev *hdev = data;
354
355         hci_dev_lock(hdev);
356         *val = hdev->voice_setting;
357         hci_dev_unlock(hdev);
358
359         return 0;
360 }
361
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363                         NULL, "0x%4.4llx\n");
364
365 static int auto_accept_delay_set(void *data, u64 val)
366 {
367         struct hci_dev *hdev = data;
368
369         hci_dev_lock(hdev);
370         hdev->auto_accept_delay = val;
371         hci_dev_unlock(hdev);
372
373         return 0;
374 }
375
376 static int auto_accept_delay_get(void *data, u64 *val)
377 {
378         struct hci_dev *hdev = data;
379
380         hci_dev_lock(hdev);
381         *val = hdev->auto_accept_delay;
382         hci_dev_unlock(hdev);
383
384         return 0;
385 }
386
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388                         auto_accept_delay_set, "%llu\n");
389
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391                                      size_t count, loff_t *ppos)
392 {
393         struct hci_dev *hdev = file->private_data;
394         char buf[3];
395
396         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
397         buf[1] = '\n';
398         buf[2] = '\0';
399         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400 }
401
402 static ssize_t force_sc_support_write(struct file *file,
403                                       const char __user *user_buf,
404                                       size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[32];
408         size_t buf_size = min(count, (sizeof(buf)-1));
409         bool enable;
410
411         if (test_bit(HCI_UP, &hdev->flags))
412                 return -EBUSY;
413
414         if (copy_from_user(buf, user_buf, buf_size))
415                 return -EFAULT;
416
417         buf[buf_size] = '\0';
418         if (strtobool(buf, &enable))
419                 return -EINVAL;
420
421         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
422                 return -EALREADY;
423
424         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
425
426         return count;
427 }
428
429 static const struct file_operations force_sc_support_fops = {
430         .open           = simple_open,
431         .read           = force_sc_support_read,
432         .write          = force_sc_support_write,
433         .llseek         = default_llseek,
434 };
435
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437                                  size_t count, loff_t *ppos)
438 {
439         struct hci_dev *hdev = file->private_data;
440         char buf[3];
441
442         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443         buf[1] = '\n';
444         buf[2] = '\0';
445         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446 }
447
448 static const struct file_operations sc_only_mode_fops = {
449         .open           = simple_open,
450         .read           = sc_only_mode_read,
451         .llseek         = default_llseek,
452 };
453
454 static int idle_timeout_set(void *data, u64 val)
455 {
456         struct hci_dev *hdev = data;
457
458         if (val != 0 && (val < 500 || val > 3600000))
459                 return -EINVAL;
460
461         hci_dev_lock(hdev);
462         hdev->idle_timeout = val;
463         hci_dev_unlock(hdev);
464
465         return 0;
466 }
467
468 static int idle_timeout_get(void *data, u64 *val)
469 {
470         struct hci_dev *hdev = data;
471
472         hci_dev_lock(hdev);
473         *val = hdev->idle_timeout;
474         hci_dev_unlock(hdev);
475
476         return 0;
477 }
478
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480                         idle_timeout_set, "%llu\n");
481
482 static int rpa_timeout_set(void *data, u64 val)
483 {
484         struct hci_dev *hdev = data;
485
486         /* Require the RPA timeout to be at least 30 seconds and at most
487          * 24 hours.
488          */
489         if (val < 30 || val > (60 * 60 * 24))
490                 return -EINVAL;
491
492         hci_dev_lock(hdev);
493         hdev->rpa_timeout = val;
494         hci_dev_unlock(hdev);
495
496         return 0;
497 }
498
499 static int rpa_timeout_get(void *data, u64 *val)
500 {
501         struct hci_dev *hdev = data;
502
503         hci_dev_lock(hdev);
504         *val = hdev->rpa_timeout;
505         hci_dev_unlock(hdev);
506
507         return 0;
508 }
509
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511                         rpa_timeout_set, "%llu\n");
512
513 static int sniff_min_interval_set(void *data, u64 val)
514 {
515         struct hci_dev *hdev = data;
516
517         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518                 return -EINVAL;
519
520         hci_dev_lock(hdev);
521         hdev->sniff_min_interval = val;
522         hci_dev_unlock(hdev);
523
524         return 0;
525 }
526
527 static int sniff_min_interval_get(void *data, u64 *val)
528 {
529         struct hci_dev *hdev = data;
530
531         hci_dev_lock(hdev);
532         *val = hdev->sniff_min_interval;
533         hci_dev_unlock(hdev);
534
535         return 0;
536 }
537
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539                         sniff_min_interval_set, "%llu\n");
540
541 static int sniff_max_interval_set(void *data, u64 val)
542 {
543         struct hci_dev *hdev = data;
544
545         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546                 return -EINVAL;
547
548         hci_dev_lock(hdev);
549         hdev->sniff_max_interval = val;
550         hci_dev_unlock(hdev);
551
552         return 0;
553 }
554
555 static int sniff_max_interval_get(void *data, u64 *val)
556 {
557         struct hci_dev *hdev = data;
558
559         hci_dev_lock(hdev);
560         *val = hdev->sniff_max_interval;
561         hci_dev_unlock(hdev);
562
563         return 0;
564 }
565
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567                         sniff_max_interval_set, "%llu\n");
568
569 static int conn_info_min_age_set(void *data, u64 val)
570 {
571         struct hci_dev *hdev = data;
572
573         if (val == 0 || val > hdev->conn_info_max_age)
574                 return -EINVAL;
575
576         hci_dev_lock(hdev);
577         hdev->conn_info_min_age = val;
578         hci_dev_unlock(hdev);
579
580         return 0;
581 }
582
583 static int conn_info_min_age_get(void *data, u64 *val)
584 {
585         struct hci_dev *hdev = data;
586
587         hci_dev_lock(hdev);
588         *val = hdev->conn_info_min_age;
589         hci_dev_unlock(hdev);
590
591         return 0;
592 }
593
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595                         conn_info_min_age_set, "%llu\n");
596
597 static int conn_info_max_age_set(void *data, u64 val)
598 {
599         struct hci_dev *hdev = data;
600
601         if (val == 0 || val < hdev->conn_info_min_age)
602                 return -EINVAL;
603
604         hci_dev_lock(hdev);
605         hdev->conn_info_max_age = val;
606         hci_dev_unlock(hdev);
607
608         return 0;
609 }
610
611 static int conn_info_max_age_get(void *data, u64 *val)
612 {
613         struct hci_dev *hdev = data;
614
615         hci_dev_lock(hdev);
616         *val = hdev->conn_info_max_age;
617         hci_dev_unlock(hdev);
618
619         return 0;
620 }
621
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623                         conn_info_max_age_set, "%llu\n");
624
625 static int identity_show(struct seq_file *f, void *p)
626 {
627         struct hci_dev *hdev = f->private;
628         bdaddr_t addr;
629         u8 addr_type;
630
631         hci_dev_lock(hdev);
632
633         hci_copy_identity_address(hdev, &addr, &addr_type);
634
635         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636                    16, hdev->irk, &hdev->rpa);
637
638         hci_dev_unlock(hdev);
639
640         return 0;
641 }
642
643 static int identity_open(struct inode *inode, struct file *file)
644 {
645         return single_open(file, identity_show, inode->i_private);
646 }
647
648 static const struct file_operations identity_fops = {
649         .open           = identity_open,
650         .read           = seq_read,
651         .llseek         = seq_lseek,
652         .release        = single_release,
653 };
654
655 static int random_address_show(struct seq_file *f, void *p)
656 {
657         struct hci_dev *hdev = f->private;
658
659         hci_dev_lock(hdev);
660         seq_printf(f, "%pMR\n", &hdev->random_addr);
661         hci_dev_unlock(hdev);
662
663         return 0;
664 }
665
666 static int random_address_open(struct inode *inode, struct file *file)
667 {
668         return single_open(file, random_address_show, inode->i_private);
669 }
670
671 static const struct file_operations random_address_fops = {
672         .open           = random_address_open,
673         .read           = seq_read,
674         .llseek         = seq_lseek,
675         .release        = single_release,
676 };
677
678 static int static_address_show(struct seq_file *f, void *p)
679 {
680         struct hci_dev *hdev = f->private;
681
682         hci_dev_lock(hdev);
683         seq_printf(f, "%pMR\n", &hdev->static_addr);
684         hci_dev_unlock(hdev);
685
686         return 0;
687 }
688
689 static int static_address_open(struct inode *inode, struct file *file)
690 {
691         return single_open(file, static_address_show, inode->i_private);
692 }
693
694 static const struct file_operations static_address_fops = {
695         .open           = static_address_open,
696         .read           = seq_read,
697         .llseek         = seq_lseek,
698         .release        = single_release,
699 };
700
701 static ssize_t force_static_address_read(struct file *file,
702                                          char __user *user_buf,
703                                          size_t count, loff_t *ppos)
704 {
705         struct hci_dev *hdev = file->private_data;
706         char buf[3];
707
708         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
709         buf[1] = '\n';
710         buf[2] = '\0';
711         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712 }
713
714 static ssize_t force_static_address_write(struct file *file,
715                                           const char __user *user_buf,
716                                           size_t count, loff_t *ppos)
717 {
718         struct hci_dev *hdev = file->private_data;
719         char buf[32];
720         size_t buf_size = min(count, (sizeof(buf)-1));
721         bool enable;
722
723         if (test_bit(HCI_UP, &hdev->flags))
724                 return -EBUSY;
725
726         if (copy_from_user(buf, user_buf, buf_size))
727                 return -EFAULT;
728
729         buf[buf_size] = '\0';
730         if (strtobool(buf, &enable))
731                 return -EINVAL;
732
733         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
734                 return -EALREADY;
735
736         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
737
738         return count;
739 }
740
741 static const struct file_operations force_static_address_fops = {
742         .open           = simple_open,
743         .read           = force_static_address_read,
744         .write          = force_static_address_write,
745         .llseek         = default_llseek,
746 };
747
748 static int white_list_show(struct seq_file *f, void *ptr)
749 {
750         struct hci_dev *hdev = f->private;
751         struct bdaddr_list *b;
752
753         hci_dev_lock(hdev);
754         list_for_each_entry(b, &hdev->le_white_list, list)
755                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756         hci_dev_unlock(hdev);
757
758         return 0;
759 }
760
761 static int white_list_open(struct inode *inode, struct file *file)
762 {
763         return single_open(file, white_list_show, inode->i_private);
764 }
765
766 static const struct file_operations white_list_fops = {
767         .open           = white_list_open,
768         .read           = seq_read,
769         .llseek         = seq_lseek,
770         .release        = single_release,
771 };
772
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774 {
775         struct hci_dev *hdev = f->private;
776         struct list_head *p, *n;
777
778         hci_dev_lock(hdev);
779         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782                            &irk->bdaddr, irk->addr_type,
783                            16, irk->val, &irk->rpa);
784         }
785         hci_dev_unlock(hdev);
786
787         return 0;
788 }
789
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791 {
792         return single_open(file, identity_resolving_keys_show,
793                            inode->i_private);
794 }
795
796 static const struct file_operations identity_resolving_keys_fops = {
797         .open           = identity_resolving_keys_open,
798         .read           = seq_read,
799         .llseek         = seq_lseek,
800         .release        = single_release,
801 };
802
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
804 {
805         struct hci_dev *hdev = f->private;
806         struct list_head *p, *n;
807
808         hci_dev_lock(hdev);
809         list_for_each_safe(p, n, &hdev->long_term_keys) {
810                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814                            __le64_to_cpu(ltk->rand), 16, ltk->val);
815         }
816         hci_dev_unlock(hdev);
817
818         return 0;
819 }
820
821 static int long_term_keys_open(struct inode *inode, struct file *file)
822 {
823         return single_open(file, long_term_keys_show, inode->i_private);
824 }
825
826 static const struct file_operations long_term_keys_fops = {
827         .open           = long_term_keys_open,
828         .read           = seq_read,
829         .llseek         = seq_lseek,
830         .release        = single_release,
831 };
832
833 static int conn_min_interval_set(void *data, u64 val)
834 {
835         struct hci_dev *hdev = data;
836
837         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838                 return -EINVAL;
839
840         hci_dev_lock(hdev);
841         hdev->le_conn_min_interval = val;
842         hci_dev_unlock(hdev);
843
844         return 0;
845 }
846
847 static int conn_min_interval_get(void *data, u64 *val)
848 {
849         struct hci_dev *hdev = data;
850
851         hci_dev_lock(hdev);
852         *val = hdev->le_conn_min_interval;
853         hci_dev_unlock(hdev);
854
855         return 0;
856 }
857
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859                         conn_min_interval_set, "%llu\n");
860
861 static int conn_max_interval_set(void *data, u64 val)
862 {
863         struct hci_dev *hdev = data;
864
865         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866                 return -EINVAL;
867
868         hci_dev_lock(hdev);
869         hdev->le_conn_max_interval = val;
870         hci_dev_unlock(hdev);
871
872         return 0;
873 }
874
875 static int conn_max_interval_get(void *data, u64 *val)
876 {
877         struct hci_dev *hdev = data;
878
879         hci_dev_lock(hdev);
880         *val = hdev->le_conn_max_interval;
881         hci_dev_unlock(hdev);
882
883         return 0;
884 }
885
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887                         conn_max_interval_set, "%llu\n");
888
889 static int conn_latency_set(void *data, u64 val)
890 {
891         struct hci_dev *hdev = data;
892
893         if (val > 0x01f3)
894                 return -EINVAL;
895
896         hci_dev_lock(hdev);
897         hdev->le_conn_latency = val;
898         hci_dev_unlock(hdev);
899
900         return 0;
901 }
902
903 static int conn_latency_get(void *data, u64 *val)
904 {
905         struct hci_dev *hdev = data;
906
907         hci_dev_lock(hdev);
908         *val = hdev->le_conn_latency;
909         hci_dev_unlock(hdev);
910
911         return 0;
912 }
913
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915                         conn_latency_set, "%llu\n");
916
917 static int supervision_timeout_set(void *data, u64 val)
918 {
919         struct hci_dev *hdev = data;
920
921         if (val < 0x000a || val > 0x0c80)
922                 return -EINVAL;
923
924         hci_dev_lock(hdev);
925         hdev->le_supv_timeout = val;
926         hci_dev_unlock(hdev);
927
928         return 0;
929 }
930
931 static int supervision_timeout_get(void *data, u64 *val)
932 {
933         struct hci_dev *hdev = data;
934
935         hci_dev_lock(hdev);
936         *val = hdev->le_supv_timeout;
937         hci_dev_unlock(hdev);
938
939         return 0;
940 }
941
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943                         supervision_timeout_set, "%llu\n");
944
945 static int adv_channel_map_set(void *data, u64 val)
946 {
947         struct hci_dev *hdev = data;
948
949         if (val < 0x01 || val > 0x07)
950                 return -EINVAL;
951
952         hci_dev_lock(hdev);
953         hdev->le_adv_channel_map = val;
954         hci_dev_unlock(hdev);
955
956         return 0;
957 }
958
959 static int adv_channel_map_get(void *data, u64 *val)
960 {
961         struct hci_dev *hdev = data;
962
963         hci_dev_lock(hdev);
964         *val = hdev->le_adv_channel_map;
965         hci_dev_unlock(hdev);
966
967         return 0;
968 }
969
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971                         adv_channel_map_set, "%llu\n");
972
973 static int device_list_show(struct seq_file *f, void *ptr)
974 {
975         struct hci_dev *hdev = f->private;
976         struct hci_conn_params *p;
977
978         hci_dev_lock(hdev);
979         list_for_each_entry(p, &hdev->le_conn_params, list) {
980                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
981                            p->auto_connect);
982         }
983         hci_dev_unlock(hdev);
984
985         return 0;
986 }
987
988 static int device_list_open(struct inode *inode, struct file *file)
989 {
990         return single_open(file, device_list_show, inode->i_private);
991 }
992
993 static const struct file_operations device_list_fops = {
994         .open           = device_list_open,
995         .read           = seq_read,
996         .llseek         = seq_lseek,
997         .release        = single_release,
998 };
999
1000 /* ---- HCI requests ---- */
1001
1002 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1003 {
1004         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1005
1006         if (hdev->req_status == HCI_REQ_PEND) {
1007                 hdev->req_result = result;
1008                 hdev->req_status = HCI_REQ_DONE;
1009                 wake_up_interruptible(&hdev->req_wait_q);
1010         }
1011 }
1012
1013 static void hci_req_cancel(struct hci_dev *hdev, int err)
1014 {
1015         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1016
1017         if (hdev->req_status == HCI_REQ_PEND) {
1018                 hdev->req_result = err;
1019                 hdev->req_status = HCI_REQ_CANCELED;
1020                 wake_up_interruptible(&hdev->req_wait_q);
1021         }
1022 }
1023
1024 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1025                                             u8 event)
1026 {
1027         struct hci_ev_cmd_complete *ev;
1028         struct hci_event_hdr *hdr;
1029         struct sk_buff *skb;
1030
1031         hci_dev_lock(hdev);
1032
1033         skb = hdev->recv_evt;
1034         hdev->recv_evt = NULL;
1035
1036         hci_dev_unlock(hdev);
1037
1038         if (!skb)
1039                 return ERR_PTR(-ENODATA);
1040
1041         if (skb->len < sizeof(*hdr)) {
1042                 BT_ERR("Too short HCI event");
1043                 goto failed;
1044         }
1045
1046         hdr = (void *) skb->data;
1047         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1048
1049         if (event) {
1050                 if (hdr->evt != event)
1051                         goto failed;
1052                 return skb;
1053         }
1054
1055         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1057                 goto failed;
1058         }
1059
1060         if (skb->len < sizeof(*ev)) {
1061                 BT_ERR("Too short cmd_complete event");
1062                 goto failed;
1063         }
1064
1065         ev = (void *) skb->data;
1066         skb_pull(skb, sizeof(*ev));
1067
1068         if (opcode == __le16_to_cpu(ev->opcode))
1069                 return skb;
1070
1071         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072                __le16_to_cpu(ev->opcode));
1073
1074 failed:
1075         kfree_skb(skb);
1076         return ERR_PTR(-ENODATA);
1077 }
1078
1079 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1080                                   const void *param, u8 event, u32 timeout)
1081 {
1082         DECLARE_WAITQUEUE(wait, current);
1083         struct hci_request req;
1084         int err = 0;
1085
1086         BT_DBG("%s", hdev->name);
1087
1088         hci_req_init(&req, hdev);
1089
1090         hci_req_add_ev(&req, opcode, plen, param, event);
1091
1092         hdev->req_status = HCI_REQ_PEND;
1093
1094         err = hci_req_run(&req, hci_req_sync_complete);
1095         if (err < 0)
1096                 return ERR_PTR(err);
1097
1098         add_wait_queue(&hdev->req_wait_q, &wait);
1099         set_current_state(TASK_INTERRUPTIBLE);
1100
1101         schedule_timeout(timeout);
1102
1103         remove_wait_queue(&hdev->req_wait_q, &wait);
1104
1105         if (signal_pending(current))
1106                 return ERR_PTR(-EINTR);
1107
1108         switch (hdev->req_status) {
1109         case HCI_REQ_DONE:
1110                 err = -bt_to_errno(hdev->req_result);
1111                 break;
1112
1113         case HCI_REQ_CANCELED:
1114                 err = -hdev->req_result;
1115                 break;
1116
1117         default:
1118                 err = -ETIMEDOUT;
1119                 break;
1120         }
1121
1122         hdev->req_status = hdev->req_result = 0;
1123
1124         BT_DBG("%s end: err %d", hdev->name, err);
1125
1126         if (err < 0)
1127                 return ERR_PTR(err);
1128
1129         return hci_get_cmd_complete(hdev, opcode, event);
1130 }
1131 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1132
1133 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1134                                const void *param, u32 timeout)
1135 {
1136         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1137 }
1138 EXPORT_SYMBOL(__hci_cmd_sync);
1139
1140 /* Execute request and wait for completion. */
1141 static int __hci_req_sync(struct hci_dev *hdev,
1142                           void (*func)(struct hci_request *req,
1143                                       unsigned long opt),
1144                           unsigned long opt, __u32 timeout)
1145 {
1146         struct hci_request req;
1147         DECLARE_WAITQUEUE(wait, current);
1148         int err = 0;
1149
1150         BT_DBG("%s start", hdev->name);
1151
1152         hci_req_init(&req, hdev);
1153
1154         hdev->req_status = HCI_REQ_PEND;
1155
1156         func(&req, opt);
1157
1158         err = hci_req_run(&req, hci_req_sync_complete);
1159         if (err < 0) {
1160                 hdev->req_status = 0;
1161
1162                 /* ENODATA means the HCI request command queue is empty.
1163                  * This can happen when a request with conditionals doesn't
1164                  * trigger any commands to be sent. This is normal behavior
1165                  * and should not trigger an error return.
1166                  */
1167                 if (err == -ENODATA)
1168                         return 0;
1169
1170                 return err;
1171         }
1172
1173         add_wait_queue(&hdev->req_wait_q, &wait);
1174         set_current_state(TASK_INTERRUPTIBLE);
1175
1176         schedule_timeout(timeout);
1177
1178         remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180         if (signal_pending(current))
1181                 return -EINTR;
1182
1183         switch (hdev->req_status) {
1184         case HCI_REQ_DONE:
1185                 err = -bt_to_errno(hdev->req_result);
1186                 break;
1187
1188         case HCI_REQ_CANCELED:
1189                 err = -hdev->req_result;
1190                 break;
1191
1192         default:
1193                 err = -ETIMEDOUT;
1194                 break;
1195         }
1196
1197         hdev->req_status = hdev->req_result = 0;
1198
1199         BT_DBG("%s end: err %d", hdev->name, err);
1200
1201         return err;
1202 }
1203
1204 static int hci_req_sync(struct hci_dev *hdev,
1205                         void (*req)(struct hci_request *req,
1206                                     unsigned long opt),
1207                         unsigned long opt, __u32 timeout)
1208 {
1209         int ret;
1210
1211         if (!test_bit(HCI_UP, &hdev->flags))
1212                 return -ENETDOWN;
1213
1214         /* Serialize all requests */
1215         hci_req_lock(hdev);
1216         ret = __hci_req_sync(hdev, req, opt, timeout);
1217         hci_req_unlock(hdev);
1218
1219         return ret;
1220 }
1221
1222 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1223 {
1224         BT_DBG("%s %ld", req->hdev->name, opt);
1225
1226         /* Reset device */
1227         set_bit(HCI_RESET, &req->hdev->flags);
1228         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1229 }
1230
1231 static void bredr_init(struct hci_request *req)
1232 {
1233         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1234
1235         /* Read Local Supported Features */
1236         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1237
1238         /* Read Local Version */
1239         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1240
1241         /* Read BD Address */
1242         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1243 }
1244
1245 static void amp_init(struct hci_request *req)
1246 {
1247         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1248
1249         /* Read Local Version */
1250         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1251
1252         /* Read Local Supported Commands */
1253         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1254
1255         /* Read Local Supported Features */
1256         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1257
1258         /* Read Local AMP Info */
1259         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1260
1261         /* Read Data Blk size */
1262         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1263
1264         /* Read Flow Control Mode */
1265         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1266
1267         /* Read Location Data */
1268         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1269 }
1270
1271 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1272 {
1273         struct hci_dev *hdev = req->hdev;
1274
1275         BT_DBG("%s %ld", hdev->name, opt);
1276
1277         /* Reset */
1278         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1279                 hci_reset_req(req, 0);
1280
1281         switch (hdev->dev_type) {
1282         case HCI_BREDR:
1283                 bredr_init(req);
1284                 break;
1285
1286         case HCI_AMP:
1287                 amp_init(req);
1288                 break;
1289
1290         default:
1291                 BT_ERR("Unknown device type %d", hdev->dev_type);
1292                 break;
1293         }
1294 }
1295
1296 static void bredr_setup(struct hci_request *req)
1297 {
1298         struct hci_dev *hdev = req->hdev;
1299
1300         __le16 param;
1301         __u8 flt_type;
1302
1303         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1304         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1305
1306         /* Read Class of Device */
1307         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1308
1309         /* Read Local Name */
1310         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1311
1312         /* Read Voice Setting */
1313         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1314
1315         /* Read Number of Supported IAC */
1316         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1317
1318         /* Read Current IAC LAP */
1319         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1320
1321         /* Clear Event Filters */
1322         flt_type = HCI_FLT_CLEAR_ALL;
1323         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1324
1325         /* Connection accept timeout ~20 secs */
1326         param = cpu_to_le16(0x7d00);
1327         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1328
1329         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330          * but it does not support page scan related HCI commands.
1331          */
1332         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1333                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1335         }
1336 }
1337
1338 static void le_setup(struct hci_request *req)
1339 {
1340         struct hci_dev *hdev = req->hdev;
1341
1342         /* Read LE Buffer Size */
1343         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1344
1345         /* Read LE Local Supported Features */
1346         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1347
1348         /* Read LE Supported States */
1349         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1350
1351         /* Read LE White List Size */
1352         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1353
1354         /* Clear LE White List */
1355         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1356
1357         /* LE-only controllers have LE implicitly enabled */
1358         if (!lmp_bredr_capable(hdev))
1359                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1360 }
1361
1362 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1363 {
1364         if (lmp_ext_inq_capable(hdev))
1365                 return 0x02;
1366
1367         if (lmp_inq_rssi_capable(hdev))
1368                 return 0x01;
1369
1370         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371             hdev->lmp_subver == 0x0757)
1372                 return 0x01;
1373
1374         if (hdev->manufacturer == 15) {
1375                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1376                         return 0x01;
1377                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1378                         return 0x01;
1379                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1380                         return 0x01;
1381         }
1382
1383         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384             hdev->lmp_subver == 0x1805)
1385                 return 0x01;
1386
1387         return 0x00;
1388 }
1389
1390 static void hci_setup_inquiry_mode(struct hci_request *req)
1391 {
1392         u8 mode;
1393
1394         mode = hci_get_inquiry_mode(req->hdev);
1395
1396         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1397 }
1398
1399 static void hci_setup_event_mask(struct hci_request *req)
1400 {
1401         struct hci_dev *hdev = req->hdev;
1402
1403         /* The second byte is 0xff instead of 0x9f (two reserved bits
1404          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405          * command otherwise.
1406          */
1407         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1408
1409         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410          * any event mask for pre 1.2 devices.
1411          */
1412         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1413                 return;
1414
1415         if (lmp_bredr_capable(hdev)) {
1416                 events[4] |= 0x01; /* Flow Specification Complete */
1417                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419                 events[5] |= 0x08; /* Synchronous Connection Complete */
1420                 events[5] |= 0x10; /* Synchronous Connection Changed */
1421         } else {
1422                 /* Use a different default for LE-only devices */
1423                 memset(events, 0, sizeof(events));
1424                 events[0] |= 0x10; /* Disconnection Complete */
1425                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426                 events[1] |= 0x20; /* Command Complete */
1427                 events[1] |= 0x40; /* Command Status */
1428                 events[1] |= 0x80; /* Hardware Error */
1429                 events[2] |= 0x04; /* Number of Completed Packets */
1430                 events[3] |= 0x02; /* Data Buffer Overflow */
1431
1432                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433                         events[0] |= 0x80; /* Encryption Change */
1434                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1435                 }
1436         }
1437
1438         if (lmp_inq_rssi_capable(hdev))
1439                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1440
1441         if (lmp_sniffsubr_capable(hdev))
1442                 events[5] |= 0x20; /* Sniff Subrating */
1443
1444         if (lmp_pause_enc_capable(hdev))
1445                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1446
1447         if (lmp_ext_inq_capable(hdev))
1448                 events[5] |= 0x40; /* Extended Inquiry Result */
1449
1450         if (lmp_no_flush_capable(hdev))
1451                 events[7] |= 0x01; /* Enhanced Flush Complete */
1452
1453         if (lmp_lsto_capable(hdev))
1454                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1455
1456         if (lmp_ssp_capable(hdev)) {
1457                 events[6] |= 0x01;      /* IO Capability Request */
1458                 events[6] |= 0x02;      /* IO Capability Response */
1459                 events[6] |= 0x04;      /* User Confirmation Request */
1460                 events[6] |= 0x08;      /* User Passkey Request */
1461                 events[6] |= 0x10;      /* Remote OOB Data Request */
1462                 events[6] |= 0x20;      /* Simple Pairing Complete */
1463                 events[7] |= 0x04;      /* User Passkey Notification */
1464                 events[7] |= 0x08;      /* Keypress Notification */
1465                 events[7] |= 0x10;      /* Remote Host Supported
1466                                          * Features Notification
1467                                          */
1468         }
1469
1470         if (lmp_le_capable(hdev))
1471                 events[7] |= 0x20;      /* LE Meta-Event */
1472
1473         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1474 }
1475
1476 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1477 {
1478         struct hci_dev *hdev = req->hdev;
1479
1480         if (lmp_bredr_capable(hdev))
1481                 bredr_setup(req);
1482         else
1483                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1484
1485         if (lmp_le_capable(hdev))
1486                 le_setup(req);
1487
1488         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489          * local supported commands HCI command.
1490          */
1491         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1492                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1493
1494         if (lmp_ssp_capable(hdev)) {
1495                 /* When SSP is available, then the host features page
1496                  * should also be available as well. However some
1497                  * controllers list the max_page as 0 as long as SSP
1498                  * has not been enabled. To achieve proper debugging
1499                  * output, force the minimum max_page to 1 at least.
1500                  */
1501                 hdev->max_page = 0x01;
1502
1503                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1504                         u8 mode = 0x01;
1505                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506                                     sizeof(mode), &mode);
1507                 } else {
1508                         struct hci_cp_write_eir cp;
1509
1510                         memset(hdev->eir, 0, sizeof(hdev->eir));
1511                         memset(&cp, 0, sizeof(cp));
1512
1513                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1514                 }
1515         }
1516
1517         if (lmp_inq_rssi_capable(hdev))
1518                 hci_setup_inquiry_mode(req);
1519
1520         if (lmp_inq_tx_pwr_capable(hdev))
1521                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1522
1523         if (lmp_ext_feat_capable(hdev)) {
1524                 struct hci_cp_read_local_ext_features cp;
1525
1526                 cp.page = 0x01;
1527                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1528                             sizeof(cp), &cp);
1529         }
1530
1531         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1532                 u8 enable = 1;
1533                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1534                             &enable);
1535         }
1536 }
1537
1538 static void hci_setup_link_policy(struct hci_request *req)
1539 {
1540         struct hci_dev *hdev = req->hdev;
1541         struct hci_cp_write_def_link_policy cp;
1542         u16 link_policy = 0;
1543
1544         if (lmp_rswitch_capable(hdev))
1545                 link_policy |= HCI_LP_RSWITCH;
1546         if (lmp_hold_capable(hdev))
1547                 link_policy |= HCI_LP_HOLD;
1548         if (lmp_sniff_capable(hdev))
1549                 link_policy |= HCI_LP_SNIFF;
1550         if (lmp_park_capable(hdev))
1551                 link_policy |= HCI_LP_PARK;
1552
1553         cp.policy = cpu_to_le16(link_policy);
1554         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1555 }
1556
1557 static void hci_set_le_support(struct hci_request *req)
1558 {
1559         struct hci_dev *hdev = req->hdev;
1560         struct hci_cp_write_le_host_supported cp;
1561
1562         /* LE-only devices do not support explicit enablement */
1563         if (!lmp_bredr_capable(hdev))
1564                 return;
1565
1566         memset(&cp, 0, sizeof(cp));
1567
1568         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1569                 cp.le = 0x01;
1570                 cp.simul = lmp_le_br_capable(hdev);
1571         }
1572
1573         if (cp.le != lmp_host_le_capable(hdev))
1574                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1575                             &cp);
1576 }
1577
1578 static void hci_set_event_mask_page_2(struct hci_request *req)
1579 {
1580         struct hci_dev *hdev = req->hdev;
1581         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1582
1583         /* If Connectionless Slave Broadcast master role is supported
1584          * enable all necessary events for it.
1585          */
1586         if (lmp_csb_master_capable(hdev)) {
1587                 events[1] |= 0x40;      /* Triggered Clock Capture */
1588                 events[1] |= 0x80;      /* Synchronization Train Complete */
1589                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1590                 events[2] |= 0x20;      /* CSB Channel Map Change */
1591         }
1592
1593         /* If Connectionless Slave Broadcast slave role is supported
1594          * enable all necessary events for it.
1595          */
1596         if (lmp_csb_slave_capable(hdev)) {
1597                 events[2] |= 0x01;      /* Synchronization Train Received */
1598                 events[2] |= 0x02;      /* CSB Receive */
1599                 events[2] |= 0x04;      /* CSB Timeout */
1600                 events[2] |= 0x08;      /* Truncated Page Complete */
1601         }
1602
1603         /* Enable Authenticated Payload Timeout Expired event if supported */
1604         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1605                 events[2] |= 0x80;
1606
1607         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1608 }
1609
1610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1611 {
1612         struct hci_dev *hdev = req->hdev;
1613         u8 p;
1614
1615         hci_setup_event_mask(req);
1616
1617         /* Some Broadcom based Bluetooth controllers do not support the
1618          * Delete Stored Link Key command. They are clearly indicating its
1619          * absence in the bit mask of supported commands.
1620          *
1621          * Check the supported commands and only if the the command is marked
1622          * as supported send it. If not supported assume that the controller
1623          * does not have actual support for stored link keys which makes this
1624          * command redundant anyway.
1625          *
1626          * Some controllers indicate that they support handling deleting
1627          * stored link keys, but they don't. The quirk lets a driver
1628          * just disable this command.
1629          */
1630         if (hdev->commands[6] & 0x80 &&
1631             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1632                 struct hci_cp_delete_stored_link_key cp;
1633
1634                 bacpy(&cp.bdaddr, BDADDR_ANY);
1635                 cp.delete_all = 0x01;
1636                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1637                             sizeof(cp), &cp);
1638         }
1639
1640         if (hdev->commands[5] & 0x10)
1641                 hci_setup_link_policy(req);
1642
1643         if (lmp_le_capable(hdev)) {
1644                 u8 events[8];
1645
1646                 memset(events, 0, sizeof(events));
1647                 events[0] = 0x0f;
1648
1649                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1650                         events[0] |= 0x10;      /* LE Long Term Key Request */
1651
1652                 /* If controller supports the Connection Parameters Request
1653                  * Link Layer Procedure, enable the corresponding event.
1654                  */
1655                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1656                         events[0] |= 0x20;      /* LE Remote Connection
1657                                                  * Parameter Request
1658                                                  */
1659
1660                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1661                             events);
1662
1663                 if (hdev->commands[25] & 0x40) {
1664                         /* Read LE Advertising Channel TX Power */
1665                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1666                 }
1667
1668                 hci_set_le_support(req);
1669         }
1670
1671         /* Read features beyond page 1 if available */
1672         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1673                 struct hci_cp_read_local_ext_features cp;
1674
1675                 cp.page = p;
1676                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1677                             sizeof(cp), &cp);
1678         }
1679 }
1680
1681 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1682 {
1683         struct hci_dev *hdev = req->hdev;
1684
1685         /* Set event mask page 2 if the HCI command for it is supported */
1686         if (hdev->commands[22] & 0x04)
1687                 hci_set_event_mask_page_2(req);
1688
1689         /* Read local codec list if the HCI command is supported */
1690         if (hdev->commands[29] & 0x20)
1691                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1692
1693         /* Get MWS transport configuration if the HCI command is supported */
1694         if (hdev->commands[30] & 0x08)
1695                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1696
1697         /* Check for Synchronization Train support */
1698         if (lmp_sync_train_capable(hdev))
1699                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1700
1701         /* Enable Secure Connections if supported and configured */
1702         if ((lmp_sc_capable(hdev) ||
1703              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1704             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1705                 u8 support = 0x01;
1706                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1707                             sizeof(support), &support);
1708         }
1709 }
1710
1711 static int __hci_init(struct hci_dev *hdev)
1712 {
1713         int err;
1714
1715         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1716         if (err < 0)
1717                 return err;
1718
1719         /* The Device Under Test (DUT) mode is special and available for
1720          * all controller types. So just create it early on.
1721          */
1722         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1723                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1724                                     &dut_mode_fops);
1725         }
1726
1727         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1728          * BR/EDR/LE type controllers. AMP controllers only need the
1729          * first stage init.
1730          */
1731         if (hdev->dev_type != HCI_BREDR)
1732                 return 0;
1733
1734         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1735         if (err < 0)
1736                 return err;
1737
1738         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1739         if (err < 0)
1740                 return err;
1741
1742         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1743         if (err < 0)
1744                 return err;
1745
1746         /* Only create debugfs entries during the initial setup
1747          * phase and not every time the controller gets powered on.
1748          */
1749         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1750                 return 0;
1751
1752         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1753                             &features_fops);
1754         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1755                            &hdev->manufacturer);
1756         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1757         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1758         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1759                             &blacklist_fops);
1760         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1761                             &whitelist_fops);
1762         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1763
1764         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1765                             &conn_info_min_age_fops);
1766         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1767                             &conn_info_max_age_fops);
1768
1769         if (lmp_bredr_capable(hdev)) {
1770                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1771                                     hdev, &inquiry_cache_fops);
1772                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1773                                     hdev, &link_keys_fops);
1774                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1775                                     hdev, &dev_class_fops);
1776                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1777                                     hdev, &voice_setting_fops);
1778         }
1779
1780         if (lmp_ssp_capable(hdev)) {
1781                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1782                                     hdev, &auto_accept_delay_fops);
1783                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1784                                     hdev, &force_sc_support_fops);
1785                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1786                                     hdev, &sc_only_mode_fops);
1787         }
1788
1789         if (lmp_sniff_capable(hdev)) {
1790                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1791                                     hdev, &idle_timeout_fops);
1792                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1793                                     hdev, &sniff_min_interval_fops);
1794                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1795                                     hdev, &sniff_max_interval_fops);
1796         }
1797
1798         if (lmp_le_capable(hdev)) {
1799                 debugfs_create_file("identity", 0400, hdev->debugfs,
1800                                     hdev, &identity_fops);
1801                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1802                                     hdev, &rpa_timeout_fops);
1803                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1804                                     hdev, &random_address_fops);
1805                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1806                                     hdev, &static_address_fops);
1807
1808                 /* For controllers with a public address, provide a debug
1809                  * option to force the usage of the configured static
1810                  * address. By default the public address is used.
1811                  */
1812                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1813                         debugfs_create_file("force_static_address", 0644,
1814                                             hdev->debugfs, hdev,
1815                                             &force_static_address_fops);
1816
1817                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1818                                   &hdev->le_white_list_size);
1819                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1820                                     &white_list_fops);
1821                 debugfs_create_file("identity_resolving_keys", 0400,
1822                                     hdev->debugfs, hdev,
1823                                     &identity_resolving_keys_fops);
1824                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1825                                     hdev, &long_term_keys_fops);
1826                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1827                                     hdev, &conn_min_interval_fops);
1828                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1829                                     hdev, &conn_max_interval_fops);
1830                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1831                                     hdev, &conn_latency_fops);
1832                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1833                                     hdev, &supervision_timeout_fops);
1834                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1835                                     hdev, &adv_channel_map_fops);
1836                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1837                                     &device_list_fops);
1838                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1839                                    hdev->debugfs,
1840                                    &hdev->discov_interleaved_timeout);
1841         }
1842
1843         return 0;
1844 }
1845
1846 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1847 {
1848         struct hci_dev *hdev = req->hdev;
1849
1850         BT_DBG("%s %ld", hdev->name, opt);
1851
1852         /* Reset */
1853         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1854                 hci_reset_req(req, 0);
1855
1856         /* Read Local Version */
1857         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1858
1859         /* Read BD Address */
1860         if (hdev->set_bdaddr)
1861                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1862 }
1863
1864 static int __hci_unconf_init(struct hci_dev *hdev)
1865 {
1866         int err;
1867
1868         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1869                 return 0;
1870
1871         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1872         if (err < 0)
1873                 return err;
1874
1875         return 0;
1876 }
1877
1878 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1879 {
1880         __u8 scan = opt;
1881
1882         BT_DBG("%s %x", req->hdev->name, scan);
1883
1884         /* Inquiry and Page scans */
1885         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1886 }
1887
1888 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1889 {
1890         __u8 auth = opt;
1891
1892         BT_DBG("%s %x", req->hdev->name, auth);
1893
1894         /* Authentication */
1895         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1896 }
1897
1898 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1899 {
1900         __u8 encrypt = opt;
1901
1902         BT_DBG("%s %x", req->hdev->name, encrypt);
1903
1904         /* Encryption */
1905         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1906 }
1907
1908 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1909 {
1910         __le16 policy = cpu_to_le16(opt);
1911
1912         BT_DBG("%s %x", req->hdev->name, policy);
1913
1914         /* Default link policy */
1915         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1916 }
1917
1918 /* Get HCI device by index.
1919  * Device is held on return. */
1920 struct hci_dev *hci_dev_get(int index)
1921 {
1922         struct hci_dev *hdev = NULL, *d;
1923
1924         BT_DBG("%d", index);
1925
1926         if (index < 0)
1927                 return NULL;
1928
1929         read_lock(&hci_dev_list_lock);
1930         list_for_each_entry(d, &hci_dev_list, list) {
1931                 if (d->id == index) {
1932                         hdev = hci_dev_hold(d);
1933                         break;
1934                 }
1935         }
1936         read_unlock(&hci_dev_list_lock);
1937         return hdev;
1938 }
1939
1940 /* ---- Inquiry support ---- */
1941
1942 bool hci_discovery_active(struct hci_dev *hdev)
1943 {
1944         struct discovery_state *discov = &hdev->discovery;
1945
1946         switch (discov->state) {
1947         case DISCOVERY_FINDING:
1948         case DISCOVERY_RESOLVING:
1949                 return true;
1950
1951         default:
1952                 return false;
1953         }
1954 }
1955
1956 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1957 {
1958         int old_state = hdev->discovery.state;
1959
1960         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1961
1962         if (old_state == state)
1963                 return;
1964
1965         hdev->discovery.state = state;
1966
1967         switch (state) {
1968         case DISCOVERY_STOPPED:
1969                 hci_update_background_scan(hdev);
1970
1971                 if (old_state != DISCOVERY_STARTING)
1972                         mgmt_discovering(hdev, 0);
1973                 break;
1974         case DISCOVERY_STARTING:
1975                 break;
1976         case DISCOVERY_FINDING:
1977                 mgmt_discovering(hdev, 1);
1978                 break;
1979         case DISCOVERY_RESOLVING:
1980                 break;
1981         case DISCOVERY_STOPPING:
1982                 break;
1983         }
1984 }
1985
1986 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1987 {
1988         struct discovery_state *cache = &hdev->discovery;
1989         struct inquiry_entry *p, *n;
1990
1991         list_for_each_entry_safe(p, n, &cache->all, all) {
1992                 list_del(&p->all);
1993                 kfree(p);
1994         }
1995
1996         INIT_LIST_HEAD(&cache->unknown);
1997         INIT_LIST_HEAD(&cache->resolve);
1998 }
1999
2000 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2001                                                bdaddr_t *bdaddr)
2002 {
2003         struct discovery_state *cache = &hdev->discovery;
2004         struct inquiry_entry *e;
2005
2006         BT_DBG("cache %p, %pMR", cache, bdaddr);
2007
2008         list_for_each_entry(e, &cache->all, all) {
2009                 if (!bacmp(&e->data.bdaddr, bdaddr))
2010                         return e;
2011         }
2012
2013         return NULL;
2014 }
2015
2016 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2017                                                        bdaddr_t *bdaddr)
2018 {
2019         struct discovery_state *cache = &hdev->discovery;
2020         struct inquiry_entry *e;
2021
2022         BT_DBG("cache %p, %pMR", cache, bdaddr);
2023
2024         list_for_each_entry(e, &cache->unknown, list) {
2025                 if (!bacmp(&e->data.bdaddr, bdaddr))
2026                         return e;
2027         }
2028
2029         return NULL;
2030 }
2031
2032 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2033                                                        bdaddr_t *bdaddr,
2034                                                        int state)
2035 {
2036         struct discovery_state *cache = &hdev->discovery;
2037         struct inquiry_entry *e;
2038
2039         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2040
2041         list_for_each_entry(e, &cache->resolve, list) {
2042                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2043                         return e;
2044                 if (!bacmp(&e->data.bdaddr, bdaddr))
2045                         return e;
2046         }
2047
2048         return NULL;
2049 }
2050
2051 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2052                                       struct inquiry_entry *ie)
2053 {
2054         struct discovery_state *cache = &hdev->discovery;
2055         struct list_head *pos = &cache->resolve;
2056         struct inquiry_entry *p;
2057
2058         list_del(&ie->list);
2059
2060         list_for_each_entry(p, &cache->resolve, list) {
2061                 if (p->name_state != NAME_PENDING &&
2062                     abs(p->data.rssi) >= abs(ie->data.rssi))
2063                         break;
2064                 pos = &p->list;
2065         }
2066
2067         list_add(&ie->list, pos);
2068 }
2069
2070 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2071                              bool name_known)
2072 {
2073         struct discovery_state *cache = &hdev->discovery;
2074         struct inquiry_entry *ie;
2075         u32 flags = 0;
2076
2077         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2078
2079         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2080
2081         if (!data->ssp_mode)
2082                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2083
2084         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2085         if (ie) {
2086                 if (!ie->data.ssp_mode)
2087                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2088
2089                 if (ie->name_state == NAME_NEEDED &&
2090                     data->rssi != ie->data.rssi) {
2091                         ie->data.rssi = data->rssi;
2092                         hci_inquiry_cache_update_resolve(hdev, ie);
2093                 }
2094
2095                 goto update;
2096         }
2097
2098         /* Entry not in the cache. Add new one. */
2099         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2100         if (!ie) {
2101                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2102                 goto done;
2103         }
2104
2105         list_add(&ie->all, &cache->all);
2106
2107         if (name_known) {
2108                 ie->name_state = NAME_KNOWN;
2109         } else {
2110                 ie->name_state = NAME_NOT_KNOWN;
2111                 list_add(&ie->list, &cache->unknown);
2112         }
2113
2114 update:
2115         if (name_known && ie->name_state != NAME_KNOWN &&
2116             ie->name_state != NAME_PENDING) {
2117                 ie->name_state = NAME_KNOWN;
2118                 list_del(&ie->list);
2119         }
2120
2121         memcpy(&ie->data, data, sizeof(*data));
2122         ie->timestamp = jiffies;
2123         cache->timestamp = jiffies;
2124
2125         if (ie->name_state == NAME_NOT_KNOWN)
2126                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2127
2128 done:
2129         return flags;
2130 }
2131
2132 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2133 {
2134         struct discovery_state *cache = &hdev->discovery;
2135         struct inquiry_info *info = (struct inquiry_info *) buf;
2136         struct inquiry_entry *e;
2137         int copied = 0;
2138
2139         list_for_each_entry(e, &cache->all, all) {
2140                 struct inquiry_data *data = &e->data;
2141
2142                 if (copied >= num)
2143                         break;
2144
2145                 bacpy(&info->bdaddr, &data->bdaddr);
2146                 info->pscan_rep_mode    = data->pscan_rep_mode;
2147                 info->pscan_period_mode = data->pscan_period_mode;
2148                 info->pscan_mode        = data->pscan_mode;
2149                 memcpy(info->dev_class, data->dev_class, 3);
2150                 info->clock_offset      = data->clock_offset;
2151
2152                 info++;
2153                 copied++;
2154         }
2155
2156         BT_DBG("cache %p, copied %d", cache, copied);
2157         return copied;
2158 }
2159
2160 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2161 {
2162         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2163         struct hci_dev *hdev = req->hdev;
2164         struct hci_cp_inquiry cp;
2165
2166         BT_DBG("%s", hdev->name);
2167
2168         if (test_bit(HCI_INQUIRY, &hdev->flags))
2169                 return;
2170
2171         /* Start Inquiry */
2172         memcpy(&cp.lap, &ir->lap, 3);
2173         cp.length  = ir->length;
2174         cp.num_rsp = ir->num_rsp;
2175         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2176 }
2177
2178 static int wait_inquiry(void *word)
2179 {
2180         schedule();
2181         return signal_pending(current);
2182 }
2183
2184 int hci_inquiry(void __user *arg)
2185 {
2186         __u8 __user *ptr = arg;
2187         struct hci_inquiry_req ir;
2188         struct hci_dev *hdev;
2189         int err = 0, do_inquiry = 0, max_rsp;
2190         long timeo;
2191         __u8 *buf;
2192
2193         if (copy_from_user(&ir, ptr, sizeof(ir)))
2194                 return -EFAULT;
2195
2196         hdev = hci_dev_get(ir.dev_id);
2197         if (!hdev)
2198                 return -ENODEV;
2199
2200         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2201                 err = -EBUSY;
2202                 goto done;
2203         }
2204
2205         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2206                 err = -EOPNOTSUPP;
2207                 goto done;
2208         }
2209
2210         if (hdev->dev_type != HCI_BREDR) {
2211                 err = -EOPNOTSUPP;
2212                 goto done;
2213         }
2214
2215         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2216                 err = -EOPNOTSUPP;
2217                 goto done;
2218         }
2219
2220         hci_dev_lock(hdev);
2221         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2222             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2223                 hci_inquiry_cache_flush(hdev);
2224                 do_inquiry = 1;
2225         }
2226         hci_dev_unlock(hdev);
2227
2228         timeo = ir.length * msecs_to_jiffies(2000);
2229
2230         if (do_inquiry) {
2231                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2232                                    timeo);
2233                 if (err < 0)
2234                         goto done;
2235
2236                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2237                  * cleared). If it is interrupted by a signal, return -EINTR.
2238                  */
2239                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2240                                 TASK_INTERRUPTIBLE))
2241                         return -EINTR;
2242         }
2243
2244         /* for unlimited number of responses we will use buffer with
2245          * 255 entries
2246          */
2247         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2248
2249         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2250          * copy it to the user space.
2251          */
2252         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2253         if (!buf) {
2254                 err = -ENOMEM;
2255                 goto done;
2256         }
2257
2258         hci_dev_lock(hdev);
2259         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2260         hci_dev_unlock(hdev);
2261
2262         BT_DBG("num_rsp %d", ir.num_rsp);
2263
2264         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2265                 ptr += sizeof(ir);
2266                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2267                                  ir.num_rsp))
2268                         err = -EFAULT;
2269         } else
2270                 err = -EFAULT;
2271
2272         kfree(buf);
2273
2274 done:
2275         hci_dev_put(hdev);
2276         return err;
2277 }
2278
2279 static int hci_dev_do_open(struct hci_dev *hdev)
2280 {
2281         int ret = 0;
2282
2283         BT_DBG("%s %p", hdev->name, hdev);
2284
2285         hci_req_lock(hdev);
2286
2287         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2288                 ret = -ENODEV;
2289                 goto done;
2290         }
2291
2292         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2293             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2294                 /* Check for rfkill but allow the HCI setup stage to
2295                  * proceed (which in itself doesn't cause any RF activity).
2296                  */
2297                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2298                         ret = -ERFKILL;
2299                         goto done;
2300                 }
2301
2302                 /* Check for valid public address or a configured static
2303                  * random adddress, but let the HCI setup proceed to
2304                  * be able to determine if there is a public address
2305                  * or not.
2306                  *
2307                  * In case of user channel usage, it is not important
2308                  * if a public address or static random address is
2309                  * available.
2310                  *
2311                  * This check is only valid for BR/EDR controllers
2312                  * since AMP controllers do not have an address.
2313                  */
2314                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2315                     hdev->dev_type == HCI_BREDR &&
2316                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2317                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2318                         ret = -EADDRNOTAVAIL;
2319                         goto done;
2320                 }
2321         }
2322
2323         if (test_bit(HCI_UP, &hdev->flags)) {
2324                 ret = -EALREADY;
2325                 goto done;
2326         }
2327
2328         if (hdev->open(hdev)) {
2329                 ret = -EIO;
2330                 goto done;
2331         }
2332
2333         atomic_set(&hdev->cmd_cnt, 1);
2334         set_bit(HCI_INIT, &hdev->flags);
2335
2336         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2337                 if (hdev->setup)
2338                         ret = hdev->setup(hdev);
2339
2340                 /* The transport driver can set these quirks before
2341                  * creating the HCI device or in its setup callback.
2342                  *
2343                  * In case any of them is set, the controller has to
2344                  * start up as unconfigured.
2345                  */
2346                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2347                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2348                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2349
2350                 /* For an unconfigured controller it is required to
2351                  * read at least the version information provided by
2352                  * the Read Local Version Information command.
2353                  *
2354                  * If the set_bdaddr driver callback is provided, then
2355                  * also the original Bluetooth public device address
2356                  * will be read using the Read BD Address command.
2357                  */
2358                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2359                         ret = __hci_unconf_init(hdev);
2360         }
2361
2362         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2363                 /* If public address change is configured, ensure that
2364                  * the address gets programmed. If the driver does not
2365                  * support changing the public address, fail the power
2366                  * on procedure.
2367                  */
2368                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2369                     hdev->set_bdaddr)
2370                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2371                 else
2372                         ret = -EADDRNOTAVAIL;
2373         }
2374
2375         if (!ret) {
2376                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2377                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2378                         ret = __hci_init(hdev);
2379         }
2380
2381         clear_bit(HCI_INIT, &hdev->flags);
2382
2383         if (!ret) {
2384                 hci_dev_hold(hdev);
2385                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2386                 set_bit(HCI_UP, &hdev->flags);
2387                 hci_notify(hdev, HCI_DEV_UP);
2388                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2389                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2390                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2391                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2392                     hdev->dev_type == HCI_BREDR) {
2393                         hci_dev_lock(hdev);
2394                         mgmt_powered(hdev, 1);
2395                         hci_dev_unlock(hdev);
2396                 }
2397         } else {
2398                 /* Init failed, cleanup */
2399                 flush_work(&hdev->tx_work);
2400                 flush_work(&hdev->cmd_work);
2401                 flush_work(&hdev->rx_work);
2402
2403                 skb_queue_purge(&hdev->cmd_q);
2404                 skb_queue_purge(&hdev->rx_q);
2405
2406                 if (hdev->flush)
2407                         hdev->flush(hdev);
2408
2409                 if (hdev->sent_cmd) {
2410                         kfree_skb(hdev->sent_cmd);
2411                         hdev->sent_cmd = NULL;
2412                 }
2413
2414                 hdev->close(hdev);
2415                 hdev->flags &= BIT(HCI_RAW);
2416         }
2417
2418 done:
2419         hci_req_unlock(hdev);
2420         return ret;
2421 }
2422
2423 /* ---- HCI ioctl helpers ---- */
2424
2425 int hci_dev_open(__u16 dev)
2426 {
2427         struct hci_dev *hdev;
2428         int err;
2429
2430         hdev = hci_dev_get(dev);
2431         if (!hdev)
2432                 return -ENODEV;
2433
2434         /* Devices that are marked as unconfigured can only be powered
2435          * up as user channel. Trying to bring them up as normal devices
2436          * will result into a failure. Only user channel operation is
2437          * possible.
2438          *
2439          * When this function is called for a user channel, the flag
2440          * HCI_USER_CHANNEL will be set first before attempting to
2441          * open the device.
2442          */
2443         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2444             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2445                 err = -EOPNOTSUPP;
2446                 goto done;
2447         }
2448
2449         /* We need to ensure that no other power on/off work is pending
2450          * before proceeding to call hci_dev_do_open. This is
2451          * particularly important if the setup procedure has not yet
2452          * completed.
2453          */
2454         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2455                 cancel_delayed_work(&hdev->power_off);
2456
2457         /* After this call it is guaranteed that the setup procedure
2458          * has finished. This means that error conditions like RFKILL
2459          * or no valid public or static random address apply.
2460          */
2461         flush_workqueue(hdev->req_workqueue);
2462
2463         /* For controllers not using the management interface and that
2464          * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2465          * so that pairing works for them. Once the management interface
2466          * is in use this bit will be cleared again and userspace has
2467          * to explicitly enable it.
2468          */
2469         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2470             !test_bit(HCI_MGMT, &hdev->dev_flags))
2471                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2472
2473         err = hci_dev_do_open(hdev);
2474
2475 done:
2476         hci_dev_put(hdev);
2477         return err;
2478 }
2479
2480 /* This function requires the caller holds hdev->lock */
2481 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2482 {
2483         struct hci_conn_params *p;
2484
2485         list_for_each_entry(p, &hdev->le_conn_params, list)
2486                 list_del_init(&p->action);
2487
2488         BT_DBG("All LE pending actions cleared");
2489 }
2490
2491 static int hci_dev_do_close(struct hci_dev *hdev)
2492 {
2493         BT_DBG("%s %p", hdev->name, hdev);
2494
2495         cancel_delayed_work(&hdev->power_off);
2496
2497         hci_req_cancel(hdev, ENODEV);
2498         hci_req_lock(hdev);
2499
2500         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2501                 cancel_delayed_work_sync(&hdev->cmd_timer);
2502                 hci_req_unlock(hdev);
2503                 return 0;
2504         }
2505
2506         /* Flush RX and TX works */
2507         flush_work(&hdev->tx_work);
2508         flush_work(&hdev->rx_work);
2509
2510         if (hdev->discov_timeout > 0) {
2511                 cancel_delayed_work(&hdev->discov_off);
2512                 hdev->discov_timeout = 0;
2513                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2514                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2515         }
2516
2517         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2518                 cancel_delayed_work(&hdev->service_cache);
2519
2520         cancel_delayed_work_sync(&hdev->le_scan_disable);
2521
2522         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2523                 cancel_delayed_work_sync(&hdev->rpa_expired);
2524
2525         hci_dev_lock(hdev);
2526         hci_inquiry_cache_flush(hdev);
2527         hci_conn_hash_flush(hdev);
2528         hci_pend_le_actions_clear(hdev);
2529         hci_dev_unlock(hdev);
2530
2531         hci_notify(hdev, HCI_DEV_DOWN);
2532
2533         if (hdev->flush)
2534                 hdev->flush(hdev);
2535
2536         /* Reset device */
2537         skb_queue_purge(&hdev->cmd_q);
2538         atomic_set(&hdev->cmd_cnt, 1);
2539         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2540             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2541             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2542                 set_bit(HCI_INIT, &hdev->flags);
2543                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2544                 clear_bit(HCI_INIT, &hdev->flags);
2545         }
2546
2547         /* flush cmd  work */
2548         flush_work(&hdev->cmd_work);
2549
2550         /* Drop queues */
2551         skb_queue_purge(&hdev->rx_q);
2552         skb_queue_purge(&hdev->cmd_q);
2553         skb_queue_purge(&hdev->raw_q);
2554
2555         /* Drop last sent command */
2556         if (hdev->sent_cmd) {
2557                 cancel_delayed_work_sync(&hdev->cmd_timer);
2558                 kfree_skb(hdev->sent_cmd);
2559                 hdev->sent_cmd = NULL;
2560         }
2561
2562         kfree_skb(hdev->recv_evt);
2563         hdev->recv_evt = NULL;
2564
2565         /* After this point our queues are empty
2566          * and no tasks are scheduled. */
2567         hdev->close(hdev);
2568
2569         /* Clear flags */
2570         hdev->flags &= BIT(HCI_RAW);
2571         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2572
2573         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2574                 if (hdev->dev_type == HCI_BREDR) {
2575                         hci_dev_lock(hdev);
2576                         mgmt_powered(hdev, 0);
2577                         hci_dev_unlock(hdev);
2578                 }
2579         }
2580
2581         /* Controller radio is available but is currently powered down */
2582         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2583
2584         memset(hdev->eir, 0, sizeof(hdev->eir));
2585         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2586         bacpy(&hdev->random_addr, BDADDR_ANY);
2587
2588         hci_req_unlock(hdev);
2589
2590         hci_dev_put(hdev);
2591         return 0;
2592 }
2593
2594 int hci_dev_close(__u16 dev)
2595 {
2596         struct hci_dev *hdev;
2597         int err;
2598
2599         hdev = hci_dev_get(dev);
2600         if (!hdev)
2601                 return -ENODEV;
2602
2603         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2604                 err = -EBUSY;
2605                 goto done;
2606         }
2607
2608         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2609                 cancel_delayed_work(&hdev->power_off);
2610
2611         err = hci_dev_do_close(hdev);
2612
2613 done:
2614         hci_dev_put(hdev);
2615         return err;
2616 }
2617
2618 int hci_dev_reset(__u16 dev)
2619 {
2620         struct hci_dev *hdev;
2621         int ret = 0;
2622
2623         hdev = hci_dev_get(dev);
2624         if (!hdev)
2625                 return -ENODEV;
2626
2627         hci_req_lock(hdev);
2628
2629         if (!test_bit(HCI_UP, &hdev->flags)) {
2630                 ret = -ENETDOWN;
2631                 goto done;
2632         }
2633
2634         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2635                 ret = -EBUSY;
2636                 goto done;
2637         }
2638
2639         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2640                 ret = -EOPNOTSUPP;
2641                 goto done;
2642         }
2643
2644         /* Drop queues */
2645         skb_queue_purge(&hdev->rx_q);
2646         skb_queue_purge(&hdev->cmd_q);
2647
2648         hci_dev_lock(hdev);
2649         hci_inquiry_cache_flush(hdev);
2650         hci_conn_hash_flush(hdev);
2651         hci_dev_unlock(hdev);
2652
2653         if (hdev->flush)
2654                 hdev->flush(hdev);
2655
2656         atomic_set(&hdev->cmd_cnt, 1);
2657         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2658
2659         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2660
2661 done:
2662         hci_req_unlock(hdev);
2663         hci_dev_put(hdev);
2664         return ret;
2665 }
2666
2667 int hci_dev_reset_stat(__u16 dev)
2668 {
2669         struct hci_dev *hdev;
2670         int ret = 0;
2671
2672         hdev = hci_dev_get(dev);
2673         if (!hdev)
2674                 return -ENODEV;
2675
2676         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2677                 ret = -EBUSY;
2678                 goto done;
2679         }
2680
2681         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2682                 ret = -EOPNOTSUPP;
2683                 goto done;
2684         }
2685
2686         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2687
2688 done:
2689         hci_dev_put(hdev);
2690         return ret;
2691 }
2692
2693 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2694 {
2695         bool conn_changed, discov_changed;
2696
2697         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2698
2699         if ((scan & SCAN_PAGE))
2700                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2701                                                  &hdev->dev_flags);
2702         else
2703                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2704                                                   &hdev->dev_flags);
2705
2706         if ((scan & SCAN_INQUIRY)) {
2707                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2708                                                    &hdev->dev_flags);
2709         } else {
2710                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2711                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2712                                                     &hdev->dev_flags);
2713         }
2714
2715         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2716                 return;
2717
2718         if (conn_changed || discov_changed) {
2719                 /* In case this was disabled through mgmt */
2720                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2721
2722                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2723                         mgmt_update_adv_data(hdev);
2724
2725                 mgmt_new_settings(hdev);
2726         }
2727 }
2728
2729 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2730 {
2731         struct hci_dev *hdev;
2732         struct hci_dev_req dr;
2733         int err = 0;
2734
2735         if (copy_from_user(&dr, arg, sizeof(dr)))
2736                 return -EFAULT;
2737
2738         hdev = hci_dev_get(dr.dev_id);
2739         if (!hdev)
2740                 return -ENODEV;
2741
2742         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2743                 err = -EBUSY;
2744                 goto done;
2745         }
2746
2747         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2748                 err = -EOPNOTSUPP;
2749                 goto done;
2750         }
2751
2752         if (hdev->dev_type != HCI_BREDR) {
2753                 err = -EOPNOTSUPP;
2754                 goto done;
2755         }
2756
2757         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2758                 err = -EOPNOTSUPP;
2759                 goto done;
2760         }
2761
2762         switch (cmd) {
2763         case HCISETAUTH:
2764                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2765                                    HCI_INIT_TIMEOUT);
2766                 break;
2767
2768         case HCISETENCRYPT:
2769                 if (!lmp_encrypt_capable(hdev)) {
2770                         err = -EOPNOTSUPP;
2771                         break;
2772                 }
2773
2774                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2775                         /* Auth must be enabled first */
2776                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2777                                            HCI_INIT_TIMEOUT);
2778                         if (err)
2779                                 break;
2780                 }
2781
2782                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2783                                    HCI_INIT_TIMEOUT);
2784                 break;
2785
2786         case HCISETSCAN:
2787                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2788                                    HCI_INIT_TIMEOUT);
2789
2790                 /* Ensure that the connectable and discoverable states
2791                  * get correctly modified as this was a non-mgmt change.
2792                  */
2793                 if (!err)
2794                         hci_update_scan_state(hdev, dr.dev_opt);
2795                 break;
2796
2797         case HCISETLINKPOL:
2798                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2799                                    HCI_INIT_TIMEOUT);
2800                 break;
2801
2802         case HCISETLINKMODE:
2803                 hdev->link_mode = ((__u16) dr.dev_opt) &
2804                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2805                 break;
2806
2807         case HCISETPTYPE:
2808                 hdev->pkt_type = (__u16) dr.dev_opt;
2809                 break;
2810
2811         case HCISETACLMTU:
2812                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2813                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2814                 break;
2815
2816         case HCISETSCOMTU:
2817                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2818                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2819                 break;
2820
2821         default:
2822                 err = -EINVAL;
2823                 break;
2824         }
2825
2826 done:
2827         hci_dev_put(hdev);
2828         return err;
2829 }
2830
2831 int hci_get_dev_list(void __user *arg)
2832 {
2833         struct hci_dev *hdev;
2834         struct hci_dev_list_req *dl;
2835         struct hci_dev_req *dr;
2836         int n = 0, size, err;
2837         __u16 dev_num;
2838
2839         if (get_user(dev_num, (__u16 __user *) arg))
2840                 return -EFAULT;
2841
2842         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2843                 return -EINVAL;
2844
2845         size = sizeof(*dl) + dev_num * sizeof(*dr);
2846
2847         dl = kzalloc(size, GFP_KERNEL);
2848         if (!dl)
2849                 return -ENOMEM;
2850
2851         dr = dl->dev_req;
2852
2853         read_lock(&hci_dev_list_lock);
2854         list_for_each_entry(hdev, &hci_dev_list, list) {
2855                 unsigned long flags = hdev->flags;
2856
2857                 /* When the auto-off is configured it means the transport
2858                  * is running, but in that case still indicate that the
2859                  * device is actually down.
2860                  */
2861                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2862                         flags &= ~BIT(HCI_UP);
2863
2864                 (dr + n)->dev_id  = hdev->id;
2865                 (dr + n)->dev_opt = flags;
2866
2867                 if (++n >= dev_num)
2868                         break;
2869         }
2870         read_unlock(&hci_dev_list_lock);
2871
2872         dl->dev_num = n;
2873         size = sizeof(*dl) + n * sizeof(*dr);
2874
2875         err = copy_to_user(arg, dl, size);
2876         kfree(dl);
2877
2878         return err ? -EFAULT : 0;
2879 }
2880
2881 int hci_get_dev_info(void __user *arg)
2882 {
2883         struct hci_dev *hdev;
2884         struct hci_dev_info di;
2885         unsigned long flags;
2886         int err = 0;
2887
2888         if (copy_from_user(&di, arg, sizeof(di)))
2889                 return -EFAULT;
2890
2891         hdev = hci_dev_get(di.dev_id);
2892         if (!hdev)
2893                 return -ENODEV;
2894
2895         /* When the auto-off is configured it means the transport
2896          * is running, but in that case still indicate that the
2897          * device is actually down.
2898          */
2899         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2900                 flags = hdev->flags & ~BIT(HCI_UP);
2901         else
2902                 flags = hdev->flags;
2903
2904         strcpy(di.name, hdev->name);
2905         di.bdaddr   = hdev->bdaddr;
2906         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2907         di.flags    = flags;
2908         di.pkt_type = hdev->pkt_type;
2909         if (lmp_bredr_capable(hdev)) {
2910                 di.acl_mtu  = hdev->acl_mtu;
2911                 di.acl_pkts = hdev->acl_pkts;
2912                 di.sco_mtu  = hdev->sco_mtu;
2913                 di.sco_pkts = hdev->sco_pkts;
2914         } else {
2915                 di.acl_mtu  = hdev->le_mtu;
2916                 di.acl_pkts = hdev->le_pkts;
2917                 di.sco_mtu  = 0;
2918                 di.sco_pkts = 0;
2919         }
2920         di.link_policy = hdev->link_policy;
2921         di.link_mode   = hdev->link_mode;
2922
2923         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2924         memcpy(&di.features, &hdev->features, sizeof(di.features));
2925
2926         if (copy_to_user(arg, &di, sizeof(di)))
2927                 err = -EFAULT;
2928
2929         hci_dev_put(hdev);
2930
2931         return err;
2932 }
2933
2934 /* ---- Interface to HCI drivers ---- */
2935
2936 static int hci_rfkill_set_block(void *data, bool blocked)
2937 {
2938         struct hci_dev *hdev = data;
2939
2940         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2941
2942         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2943                 return -EBUSY;
2944
2945         if (blocked) {
2946                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2947                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2948                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2949                         hci_dev_do_close(hdev);
2950         } else {
2951                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2952         }
2953
2954         return 0;
2955 }
2956
2957 static const struct rfkill_ops hci_rfkill_ops = {
2958         .set_block = hci_rfkill_set_block,
2959 };
2960
2961 static void hci_power_on(struct work_struct *work)
2962 {
2963         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2964         int err;
2965
2966         BT_DBG("%s", hdev->name);
2967
2968         err = hci_dev_do_open(hdev);
2969         if (err < 0) {
2970                 mgmt_set_powered_failed(hdev, err);
2971                 return;
2972         }
2973
2974         /* During the HCI setup phase, a few error conditions are
2975          * ignored and they need to be checked now. If they are still
2976          * valid, it is important to turn the device back off.
2977          */
2978         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2979             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2980             (hdev->dev_type == HCI_BREDR &&
2981              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2982              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2983                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2984                 hci_dev_do_close(hdev);
2985         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2986                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2987                                    HCI_AUTO_OFF_TIMEOUT);
2988         }
2989
2990         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2991                 /* For unconfigured devices, set the HCI_RAW flag
2992                  * so that userspace can easily identify them.
2993                  */
2994                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2995                         set_bit(HCI_RAW, &hdev->flags);
2996
2997                 /* For fully configured devices, this will send
2998                  * the Index Added event. For unconfigured devices,
2999                  * it will send Unconfigued Index Added event.
3000                  *
3001                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3002                  * and no event will be send.
3003                  */
3004                 mgmt_index_added(hdev);
3005         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3006                 /* When the controller is now configured, then it
3007                  * is important to clear the HCI_RAW flag.
3008                  */
3009                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3010                         clear_bit(HCI_RAW, &hdev->flags);
3011
3012                 /* Powering on the controller with HCI_CONFIG set only
3013                  * happens with the transition from unconfigured to
3014                  * configured. This will send the Index Added event.
3015                  */
3016                 mgmt_index_added(hdev);
3017         }
3018 }
3019
3020 static void hci_power_off(struct work_struct *work)
3021 {
3022         struct hci_dev *hdev = container_of(work, struct hci_dev,
3023                                             power_off.work);
3024
3025         BT_DBG("%s", hdev->name);
3026
3027         hci_dev_do_close(hdev);
3028 }
3029
3030 static void hci_discov_off(struct work_struct *work)
3031 {
3032         struct hci_dev *hdev;
3033
3034         hdev = container_of(work, struct hci_dev, discov_off.work);
3035
3036         BT_DBG("%s", hdev->name);
3037
3038         mgmt_discoverable_timeout(hdev);
3039 }
3040
3041 void hci_uuids_clear(struct hci_dev *hdev)
3042 {
3043         struct bt_uuid *uuid, *tmp;
3044
3045         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3046                 list_del(&uuid->list);
3047                 kfree(uuid);
3048         }
3049 }
3050
3051 void hci_link_keys_clear(struct hci_dev *hdev)
3052 {
3053         struct list_head *p, *n;
3054
3055         list_for_each_safe(p, n, &hdev->link_keys) {
3056                 struct link_key *key;
3057
3058                 key = list_entry(p, struct link_key, list);
3059
3060                 list_del(p);
3061                 kfree(key);
3062         }
3063 }
3064
3065 void hci_smp_ltks_clear(struct hci_dev *hdev)
3066 {
3067         struct smp_ltk *k, *tmp;
3068
3069         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3070                 list_del(&k->list);
3071                 kfree(k);
3072         }
3073 }
3074
3075 void hci_smp_irks_clear(struct hci_dev *hdev)
3076 {
3077         struct smp_irk *k, *tmp;
3078
3079         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3080                 list_del(&k->list);
3081                 kfree(k);
3082         }
3083 }
3084
3085 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3086 {
3087         struct link_key *k;
3088
3089         list_for_each_entry(k, &hdev->link_keys, list)
3090                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3091                         return k;
3092
3093         return NULL;
3094 }
3095
3096 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3097                                u8 key_type, u8 old_key_type)
3098 {
3099         /* Legacy key */
3100         if (key_type < 0x03)
3101                 return true;
3102
3103         /* Debug keys are insecure so don't store them persistently */
3104         if (key_type == HCI_LK_DEBUG_COMBINATION)
3105                 return false;
3106
3107         /* Changed combination key and there's no previous one */
3108         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3109                 return false;
3110
3111         /* Security mode 3 case */
3112         if (!conn)
3113                 return true;
3114
3115         /* Neither local nor remote side had no-bonding as requirement */
3116         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3117                 return true;
3118
3119         /* Local side had dedicated bonding as requirement */
3120         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3121                 return true;
3122
3123         /* Remote side had dedicated bonding as requirement */
3124         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3125                 return true;
3126
3127         /* If none of the above criteria match, then don't store the key
3128          * persistently */
3129         return false;
3130 }
3131
3132 static u8 ltk_role(u8 type)
3133 {
3134         if (type == SMP_LTK)
3135                 return HCI_ROLE_MASTER;
3136
3137         return HCI_ROLE_SLAVE;
3138 }
3139
3140 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3141                              u8 role)
3142 {
3143         struct smp_ltk *k;
3144
3145         list_for_each_entry(k, &hdev->long_term_keys, list) {
3146                 if (k->ediv != ediv || k->rand != rand)
3147                         continue;
3148
3149                 if (ltk_role(k->type) != role)
3150                         continue;
3151
3152                 return k;
3153         }
3154
3155         return NULL;
3156 }
3157
3158 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3159                                      u8 addr_type, u8 role)
3160 {
3161         struct smp_ltk *k;
3162
3163         list_for_each_entry(k, &hdev->long_term_keys, list)
3164                 if (addr_type == k->bdaddr_type &&
3165                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3166                     ltk_role(k->type) == role)
3167                         return k;
3168
3169         return NULL;
3170 }
3171
3172 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3173 {
3174         struct smp_irk *irk;
3175
3176         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3177                 if (!bacmp(&irk->rpa, rpa))
3178                         return irk;
3179         }
3180
3181         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3182                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3183                         bacpy(&irk->rpa, rpa);
3184                         return irk;
3185                 }
3186         }
3187
3188         return NULL;
3189 }
3190
3191 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3192                                      u8 addr_type)
3193 {
3194         struct smp_irk *irk;
3195
3196         /* Identity Address must be public or static random */
3197         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3198                 return NULL;
3199
3200         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3201                 if (addr_type == irk->addr_type &&
3202                     bacmp(bdaddr, &irk->bdaddr) == 0)
3203                         return irk;
3204         }
3205
3206         return NULL;
3207 }
3208
3209 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3210                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3211                                   u8 pin_len, bool *persistent)
3212 {
3213         struct link_key *key, *old_key;
3214         u8 old_key_type;
3215
3216         old_key = hci_find_link_key(hdev, bdaddr);
3217         if (old_key) {
3218                 old_key_type = old_key->type;
3219                 key = old_key;
3220         } else {
3221                 old_key_type = conn ? conn->key_type : 0xff;
3222                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3223                 if (!key)
3224                         return NULL;
3225                 list_add(&key->list, &hdev->link_keys);
3226         }
3227
3228         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3229
3230         /* Some buggy controller combinations generate a changed
3231          * combination key for legacy pairing even when there's no
3232          * previous key */
3233         if (type == HCI_LK_CHANGED_COMBINATION &&
3234             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3235                 type = HCI_LK_COMBINATION;
3236                 if (conn)
3237                         conn->key_type = type;
3238         }
3239
3240         bacpy(&key->bdaddr, bdaddr);
3241         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3242         key->pin_len = pin_len;
3243
3244         if (type == HCI_LK_CHANGED_COMBINATION)
3245                 key->type = old_key_type;
3246         else
3247                 key->type = type;
3248
3249         if (persistent)
3250                 *persistent = hci_persistent_key(hdev, conn, type,
3251                                                  old_key_type);
3252
3253         return key;
3254 }
3255
3256 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3257                             u8 addr_type, u8 type, u8 authenticated,
3258                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3259 {
3260         struct smp_ltk *key, *old_key;
3261         u8 role = ltk_role(type);
3262
3263         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3264         if (old_key)
3265                 key = old_key;
3266         else {
3267                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3268                 if (!key)
3269                         return NULL;
3270                 list_add(&key->list, &hdev->long_term_keys);
3271         }
3272
3273         bacpy(&key->bdaddr, bdaddr);
3274         key->bdaddr_type = addr_type;
3275         memcpy(key->val, tk, sizeof(key->val));
3276         key->authenticated = authenticated;
3277         key->ediv = ediv;
3278         key->rand = rand;
3279         key->enc_size = enc_size;
3280         key->type = type;
3281
3282         return key;
3283 }
3284
3285 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3286                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3287 {
3288         struct smp_irk *irk;
3289
3290         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3291         if (!irk) {
3292                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3293                 if (!irk)
3294                         return NULL;
3295
3296                 bacpy(&irk->bdaddr, bdaddr);
3297                 irk->addr_type = addr_type;
3298
3299                 list_add(&irk->list, &hdev->identity_resolving_keys);
3300         }
3301
3302         memcpy(irk->val, val, 16);
3303         bacpy(&irk->rpa, rpa);
3304
3305         return irk;
3306 }
3307
3308 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3309 {
3310         struct link_key *key;
3311
3312         key = hci_find_link_key(hdev, bdaddr);
3313         if (!key)
3314                 return -ENOENT;
3315
3316         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3317
3318         list_del(&key->list);
3319         kfree(key);
3320
3321         return 0;
3322 }
3323
3324 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3325 {
3326         struct smp_ltk *k, *tmp;
3327         int removed = 0;
3328
3329         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3330                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3331                         continue;
3332
3333                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3334
3335                 list_del(&k->list);
3336                 kfree(k);
3337                 removed++;
3338         }
3339
3340         return removed ? 0 : -ENOENT;
3341 }
3342
3343 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3344 {
3345         struct smp_irk *k, *tmp;
3346
3347         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3348                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3349                         continue;
3350
3351                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3352
3353                 list_del(&k->list);
3354                 kfree(k);
3355         }
3356 }
3357
3358 /* HCI command timer function */
3359 static void hci_cmd_timeout(struct work_struct *work)
3360 {
3361         struct hci_dev *hdev = container_of(work, struct hci_dev,
3362                                             cmd_timer.work);
3363
3364         if (hdev->sent_cmd) {
3365                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3366                 u16 opcode = __le16_to_cpu(sent->opcode);
3367
3368                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3369         } else {
3370                 BT_ERR("%s command tx timeout", hdev->name);
3371         }
3372
3373         atomic_set(&hdev->cmd_cnt, 1);
3374         queue_work(hdev->workqueue, &hdev->cmd_work);
3375 }
3376
3377 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3378                                           bdaddr_t *bdaddr)
3379 {
3380         struct oob_data *data;
3381
3382         list_for_each_entry(data, &hdev->remote_oob_data, list)
3383                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3384                         return data;
3385
3386         return NULL;
3387 }
3388
3389 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3390 {
3391         struct oob_data *data;
3392
3393         data = hci_find_remote_oob_data(hdev, bdaddr);
3394         if (!data)
3395                 return -ENOENT;
3396
3397         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3398
3399         list_del(&data->list);
3400         kfree(data);
3401
3402         return 0;
3403 }
3404
3405 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3406 {
3407         struct oob_data *data, *n;
3408
3409         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3410                 list_del(&data->list);
3411                 kfree(data);
3412         }
3413 }
3414
3415 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3416                             u8 *hash, u8 *randomizer)
3417 {
3418         struct oob_data *data;
3419
3420         data = hci_find_remote_oob_data(hdev, bdaddr);
3421         if (!data) {
3422                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3423                 if (!data)
3424                         return -ENOMEM;
3425
3426                 bacpy(&data->bdaddr, bdaddr);
3427                 list_add(&data->list, &hdev->remote_oob_data);
3428         }
3429
3430         memcpy(data->hash192, hash, sizeof(data->hash192));
3431         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3432
3433         memset(data->hash256, 0, sizeof(data->hash256));
3434         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3435
3436         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3437
3438         return 0;
3439 }
3440
3441 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3442                                 u8 *hash192, u8 *randomizer192,
3443                                 u8 *hash256, u8 *randomizer256)
3444 {
3445         struct oob_data *data;
3446
3447         data = hci_find_remote_oob_data(hdev, bdaddr);
3448         if (!data) {
3449                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3450                 if (!data)
3451                         return -ENOMEM;
3452
3453                 bacpy(&data->bdaddr, bdaddr);
3454                 list_add(&data->list, &hdev->remote_oob_data);
3455         }
3456
3457         memcpy(data->hash192, hash192, sizeof(data->hash192));
3458         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3459
3460         memcpy(data->hash256, hash256, sizeof(data->hash256));
3461         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3462
3463         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3464
3465         return 0;
3466 }
3467
3468 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3469                                          bdaddr_t *bdaddr, u8 type)
3470 {
3471         struct bdaddr_list *b;
3472
3473         list_for_each_entry(b, bdaddr_list, list) {
3474                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3475                         return b;
3476         }
3477
3478         return NULL;
3479 }
3480
3481 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3482 {
3483         struct list_head *p, *n;
3484
3485         list_for_each_safe(p, n, bdaddr_list) {
3486                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3487
3488                 list_del(p);
3489                 kfree(b);
3490         }
3491 }
3492
3493 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3494 {
3495         struct bdaddr_list *entry;
3496
3497         if (!bacmp(bdaddr, BDADDR_ANY))
3498                 return -EBADF;
3499
3500         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3501                 return -EEXIST;
3502
3503         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3504         if (!entry)
3505                 return -ENOMEM;
3506
3507         bacpy(&entry->bdaddr, bdaddr);
3508         entry->bdaddr_type = type;
3509
3510         list_add(&entry->list, list);
3511
3512         return 0;
3513 }
3514
3515 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3516 {
3517         struct bdaddr_list *entry;
3518
3519         if (!bacmp(bdaddr, BDADDR_ANY)) {
3520                 hci_bdaddr_list_clear(list);
3521                 return 0;
3522         }
3523
3524         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3525         if (!entry)
3526                 return -ENOENT;
3527
3528         list_del(&entry->list);
3529         kfree(entry);
3530
3531         return 0;
3532 }
3533
3534 /* This function requires the caller holds hdev->lock */
3535 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3536                                                bdaddr_t *addr, u8 addr_type)
3537 {
3538         struct hci_conn_params *params;
3539
3540         /* The conn params list only contains identity addresses */
3541         if (!hci_is_identity_address(addr, addr_type))
3542                 return NULL;
3543
3544         list_for_each_entry(params, &hdev->le_conn_params, list) {
3545                 if (bacmp(&params->addr, addr) == 0 &&
3546                     params->addr_type == addr_type) {
3547                         return params;
3548                 }
3549         }
3550
3551         return NULL;
3552 }
3553
3554 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3555 {
3556         struct hci_conn *conn;
3557
3558         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3559         if (!conn)
3560                 return false;
3561
3562         if (conn->dst_type != type)
3563                 return false;
3564
3565         if (conn->state != BT_CONNECTED)
3566                 return false;
3567
3568         return true;
3569 }
3570
3571 /* This function requires the caller holds hdev->lock */
3572 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3573                                                   bdaddr_t *addr, u8 addr_type)
3574 {
3575         struct hci_conn_params *param;
3576
3577         /* The list only contains identity addresses */
3578         if (!hci_is_identity_address(addr, addr_type))
3579                 return NULL;
3580
3581         list_for_each_entry(param, list, action) {
3582                 if (bacmp(&param->addr, addr) == 0 &&
3583                     param->addr_type == addr_type)
3584                         return param;
3585         }
3586
3587         return NULL;
3588 }
3589
3590 /* This function requires the caller holds hdev->lock */
3591 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3592                                             bdaddr_t *addr, u8 addr_type)
3593 {
3594         struct hci_conn_params *params;
3595
3596         if (!hci_is_identity_address(addr, addr_type))
3597                 return NULL;
3598
3599         params = hci_conn_params_lookup(hdev, addr, addr_type);
3600         if (params)
3601                 return params;
3602
3603         params = kzalloc(sizeof(*params), GFP_KERNEL);
3604         if (!params) {
3605                 BT_ERR("Out of memory");
3606                 return NULL;
3607         }
3608
3609         bacpy(&params->addr, addr);
3610         params->addr_type = addr_type;
3611
3612         list_add(&params->list, &hdev->le_conn_params);
3613         INIT_LIST_HEAD(&params->action);
3614
3615         params->conn_min_interval = hdev->le_conn_min_interval;
3616         params->conn_max_interval = hdev->le_conn_max_interval;
3617         params->conn_latency = hdev->le_conn_latency;
3618         params->supervision_timeout = hdev->le_supv_timeout;
3619         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3620
3621         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3622
3623         return params;
3624 }
3625
3626 /* This function requires the caller holds hdev->lock */
3627 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3628                         u8 auto_connect)
3629 {
3630         struct hci_conn_params *params;
3631
3632         params = hci_conn_params_add(hdev, addr, addr_type);
3633         if (!params)
3634                 return -EIO;
3635
3636         if (params->auto_connect == auto_connect)
3637                 return 0;
3638
3639         list_del_init(&params->action);
3640
3641         switch (auto_connect) {
3642         case HCI_AUTO_CONN_DISABLED:
3643         case HCI_AUTO_CONN_LINK_LOSS:
3644                 hci_update_background_scan(hdev);
3645                 break;
3646         case HCI_AUTO_CONN_REPORT:
3647                 list_add(&params->action, &hdev->pend_le_reports);
3648                 hci_update_background_scan(hdev);
3649                 break;
3650         case HCI_AUTO_CONN_ALWAYS:
3651                 if (!is_connected(hdev, addr, addr_type)) {
3652                         list_add(&params->action, &hdev->pend_le_conns);
3653                         hci_update_background_scan(hdev);
3654                 }
3655                 break;
3656         }
3657
3658         params->auto_connect = auto_connect;
3659
3660         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3661                auto_connect);
3662
3663         return 0;
3664 }
3665
3666 /* This function requires the caller holds hdev->lock */
3667 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3668 {
3669         struct hci_conn_params *params;
3670
3671         params = hci_conn_params_lookup(hdev, addr, addr_type);
3672         if (!params)
3673                 return;
3674
3675         list_del(&params->action);
3676         list_del(&params->list);
3677         kfree(params);
3678
3679         hci_update_background_scan(hdev);
3680
3681         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3682 }
3683
3684 /* This function requires the caller holds hdev->lock */
3685 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3686 {
3687         struct hci_conn_params *params, *tmp;
3688
3689         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3690                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3691                         continue;
3692                 list_del(&params->list);
3693                 kfree(params);
3694         }
3695
3696         BT_DBG("All LE disabled connection parameters were removed");
3697 }
3698
3699 /* This function requires the caller holds hdev->lock */
3700 void hci_conn_params_clear_all(struct hci_dev *hdev)
3701 {
3702         struct hci_conn_params *params, *tmp;
3703
3704         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3705                 list_del(&params->action);
3706                 list_del(&params->list);
3707                 kfree(params);
3708         }
3709
3710         hci_update_background_scan(hdev);
3711
3712         BT_DBG("All LE connection parameters were removed");
3713 }
3714
3715 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3716 {
3717         if (status) {
3718                 BT_ERR("Failed to start inquiry: status %d", status);
3719
3720                 hci_dev_lock(hdev);
3721                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3722                 hci_dev_unlock(hdev);
3723                 return;
3724         }
3725 }
3726
3727 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3728 {
3729         /* General inquiry access code (GIAC) */
3730         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3731         struct hci_request req;
3732         struct hci_cp_inquiry cp;
3733         int err;
3734
3735         if (status) {
3736                 BT_ERR("Failed to disable LE scanning: status %d", status);
3737                 return;
3738         }
3739
3740         switch (hdev->discovery.type) {
3741         case DISCOV_TYPE_LE:
3742                 hci_dev_lock(hdev);
3743                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3744                 hci_dev_unlock(hdev);
3745                 break;
3746
3747         case DISCOV_TYPE_INTERLEAVED:
3748                 hci_req_init(&req, hdev);
3749
3750                 memset(&cp, 0, sizeof(cp));
3751                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3752                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3753                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3754
3755                 hci_dev_lock(hdev);
3756
3757                 hci_inquiry_cache_flush(hdev);
3758
3759                 err = hci_req_run(&req, inquiry_complete);
3760                 if (err) {
3761                         BT_ERR("Inquiry request failed: err %d", err);
3762                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3763                 }
3764
3765                 hci_dev_unlock(hdev);
3766                 break;
3767         }
3768 }
3769
3770 static void le_scan_disable_work(struct work_struct *work)
3771 {
3772         struct hci_dev *hdev = container_of(work, struct hci_dev,
3773                                             le_scan_disable.work);
3774         struct hci_request req;
3775         int err;
3776
3777         BT_DBG("%s", hdev->name);
3778
3779         hci_req_init(&req, hdev);
3780
3781         hci_req_add_le_scan_disable(&req);
3782
3783         err = hci_req_run(&req, le_scan_disable_work_complete);
3784         if (err)
3785                 BT_ERR("Disable LE scanning request failed: err %d", err);
3786 }
3787
3788 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3789 {
3790         struct hci_dev *hdev = req->hdev;
3791
3792         /* If we're advertising or initiating an LE connection we can't
3793          * go ahead and change the random address at this time. This is
3794          * because the eventual initiator address used for the
3795          * subsequently created connection will be undefined (some
3796          * controllers use the new address and others the one we had
3797          * when the operation started).
3798          *
3799          * In this kind of scenario skip the update and let the random
3800          * address be updated at the next cycle.
3801          */
3802         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3803             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3804                 BT_DBG("Deferring random address update");
3805                 return;
3806         }
3807
3808         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3809 }
3810
3811 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3812                               u8 *own_addr_type)
3813 {
3814         struct hci_dev *hdev = req->hdev;
3815         int err;
3816
3817         /* If privacy is enabled use a resolvable private address. If
3818          * current RPA has expired or there is something else than
3819          * the current RPA in use, then generate a new one.
3820          */
3821         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3822                 int to;
3823
3824                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3825
3826                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3827                     !bacmp(&hdev->random_addr, &hdev->rpa))
3828                         return 0;
3829
3830                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3831                 if (err < 0) {
3832                         BT_ERR("%s failed to generate new RPA", hdev->name);
3833                         return err;
3834                 }
3835
3836                 set_random_addr(req, &hdev->rpa);
3837
3838                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3839                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3840
3841                 return 0;
3842         }
3843
3844         /* In case of required privacy without resolvable private address,
3845          * use an unresolvable private address. This is useful for active
3846          * scanning and non-connectable advertising.
3847          */
3848         if (require_privacy) {
3849                 bdaddr_t urpa;
3850
3851                 get_random_bytes(&urpa, 6);
3852                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3853
3854                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3855                 set_random_addr(req, &urpa);
3856                 return 0;
3857         }
3858
3859         /* If forcing static address is in use or there is no public
3860          * address use the static address as random address (but skip
3861          * the HCI command if the current random address is already the
3862          * static one.
3863          */
3864         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3865             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3866                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3867                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3868                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3869                                     &hdev->static_addr);
3870                 return 0;
3871         }
3872
3873         /* Neither privacy nor static address is being used so use a
3874          * public address.
3875          */
3876         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3877
3878         return 0;
3879 }
3880
3881 /* Copy the Identity Address of the controller.
3882  *
3883  * If the controller has a public BD_ADDR, then by default use that one.
3884  * If this is a LE only controller without a public address, default to
3885  * the static random address.
3886  *
3887  * For debugging purposes it is possible to force controllers with a
3888  * public address to use the static random address instead.
3889  */
3890 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3891                                u8 *bdaddr_type)
3892 {
3893         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3894             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3895                 bacpy(bdaddr, &hdev->static_addr);
3896                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3897         } else {
3898                 bacpy(bdaddr, &hdev->bdaddr);
3899                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3900         }
3901 }
3902
3903 /* Alloc HCI device */
3904 struct hci_dev *hci_alloc_dev(void)
3905 {
3906         struct hci_dev *hdev;
3907
3908         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3909         if (!hdev)
3910                 return NULL;
3911
3912         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3913         hdev->esco_type = (ESCO_HV1);
3914         hdev->link_mode = (HCI_LM_ACCEPT);
3915         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3916         hdev->io_capability = 0x03;     /* No Input No Output */
3917         hdev->manufacturer = 0xffff;    /* Default to internal use */
3918         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3919         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3920
3921         hdev->sniff_max_interval = 800;
3922         hdev->sniff_min_interval = 80;
3923
3924         hdev->le_adv_channel_map = 0x07;
3925         hdev->le_scan_interval = 0x0060;
3926         hdev->le_scan_window = 0x0030;
3927         hdev->le_conn_min_interval = 0x0028;
3928         hdev->le_conn_max_interval = 0x0038;
3929         hdev->le_conn_latency = 0x0000;
3930         hdev->le_supv_timeout = 0x002a;
3931
3932         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3933         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3934         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3935         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3936
3937         mutex_init(&hdev->lock);
3938         mutex_init(&hdev->req_lock);
3939
3940         INIT_LIST_HEAD(&hdev->mgmt_pending);
3941         INIT_LIST_HEAD(&hdev->blacklist);
3942         INIT_LIST_HEAD(&hdev->whitelist);
3943         INIT_LIST_HEAD(&hdev->uuids);
3944         INIT_LIST_HEAD(&hdev->link_keys);
3945         INIT_LIST_HEAD(&hdev->long_term_keys);
3946         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3947         INIT_LIST_HEAD(&hdev->remote_oob_data);
3948         INIT_LIST_HEAD(&hdev->le_white_list);
3949         INIT_LIST_HEAD(&hdev->le_conn_params);
3950         INIT_LIST_HEAD(&hdev->pend_le_conns);
3951         INIT_LIST_HEAD(&hdev->pend_le_reports);
3952         INIT_LIST_HEAD(&hdev->conn_hash.list);
3953
3954         INIT_WORK(&hdev->rx_work, hci_rx_work);
3955         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3956         INIT_WORK(&hdev->tx_work, hci_tx_work);
3957         INIT_WORK(&hdev->power_on, hci_power_on);
3958
3959         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3960         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3961         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3962
3963         skb_queue_head_init(&hdev->rx_q);
3964         skb_queue_head_init(&hdev->cmd_q);
3965         skb_queue_head_init(&hdev->raw_q);
3966
3967         init_waitqueue_head(&hdev->req_wait_q);
3968
3969         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3970
3971         hci_init_sysfs(hdev);
3972         discovery_init(hdev);
3973
3974         return hdev;
3975 }
3976 EXPORT_SYMBOL(hci_alloc_dev);
3977
3978 /* Free HCI device */
3979 void hci_free_dev(struct hci_dev *hdev)
3980 {
3981         /* will free via device release */
3982         put_device(&hdev->dev);
3983 }
3984 EXPORT_SYMBOL(hci_free_dev);
3985
3986 /* Register HCI device */
3987 int hci_register_dev(struct hci_dev *hdev)
3988 {
3989         int id, error;
3990
3991         if (!hdev->open || !hdev->close || !hdev->send)
3992                 return -EINVAL;
3993
3994         /* Do not allow HCI_AMP devices to register at index 0,
3995          * so the index can be used as the AMP controller ID.
3996          */
3997         switch (hdev->dev_type) {
3998         case HCI_BREDR:
3999                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4000                 break;
4001         case HCI_AMP:
4002                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4003                 break;
4004         default:
4005                 return -EINVAL;
4006         }
4007
4008         if (id < 0)
4009                 return id;
4010
4011         sprintf(hdev->name, "hci%d", id);
4012         hdev->id = id;
4013
4014         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4015
4016         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4017                                           WQ_MEM_RECLAIM, 1, hdev->name);
4018         if (!hdev->workqueue) {
4019                 error = -ENOMEM;
4020                 goto err;
4021         }
4022
4023         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4024                                               WQ_MEM_RECLAIM, 1, hdev->name);
4025         if (!hdev->req_workqueue) {
4026                 destroy_workqueue(hdev->workqueue);
4027                 error = -ENOMEM;
4028                 goto err;
4029         }
4030
4031         if (!IS_ERR_OR_NULL(bt_debugfs))
4032                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4033
4034         dev_set_name(&hdev->dev, "%s", hdev->name);
4035
4036         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4037                                                CRYPTO_ALG_ASYNC);
4038         if (IS_ERR(hdev->tfm_aes)) {
4039                 BT_ERR("Unable to create crypto context");
4040                 error = PTR_ERR(hdev->tfm_aes);
4041                 hdev->tfm_aes = NULL;
4042                 goto err_wqueue;
4043         }
4044
4045         error = device_add(&hdev->dev);
4046         if (error < 0)
4047                 goto err_tfm;
4048
4049         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4050                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4051                                     hdev);
4052         if (hdev->rfkill) {
4053                 if (rfkill_register(hdev->rfkill) < 0) {
4054                         rfkill_destroy(hdev->rfkill);
4055                         hdev->rfkill = NULL;
4056                 }
4057         }
4058
4059         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4060                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4061
4062         set_bit(HCI_SETUP, &hdev->dev_flags);
4063         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4064
4065         if (hdev->dev_type == HCI_BREDR) {
4066                 /* Assume BR/EDR support until proven otherwise (such as
4067                  * through reading supported features during init.
4068                  */
4069                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4070         }
4071
4072         write_lock(&hci_dev_list_lock);
4073         list_add(&hdev->list, &hci_dev_list);
4074         write_unlock(&hci_dev_list_lock);
4075
4076         /* Devices that are marked for raw-only usage are unconfigured
4077          * and should not be included in normal operation.
4078          */
4079         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4080                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4081
4082         hci_notify(hdev, HCI_DEV_REG);
4083         hci_dev_hold(hdev);
4084
4085         queue_work(hdev->req_workqueue, &hdev->power_on);
4086
4087         return id;
4088
4089 err_tfm:
4090         crypto_free_blkcipher(hdev->tfm_aes);
4091 err_wqueue:
4092         destroy_workqueue(hdev->workqueue);
4093         destroy_workqueue(hdev->req_workqueue);
4094 err:
4095         ida_simple_remove(&hci_index_ida, hdev->id);
4096
4097         return error;
4098 }
4099 EXPORT_SYMBOL(hci_register_dev);
4100
4101 /* Unregister HCI device */
4102 void hci_unregister_dev(struct hci_dev *hdev)
4103 {
4104         int i, id;
4105
4106         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4107
4108         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4109
4110         id = hdev->id;
4111
4112         write_lock(&hci_dev_list_lock);
4113         list_del(&hdev->list);
4114         write_unlock(&hci_dev_list_lock);
4115
4116         hci_dev_do_close(hdev);
4117
4118         for (i = 0; i < NUM_REASSEMBLY; i++)
4119                 kfree_skb(hdev->reassembly[i]);
4120
4121         cancel_work_sync(&hdev->power_on);
4122
4123         if (!test_bit(HCI_INIT, &hdev->flags) &&
4124             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4125             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4126                 hci_dev_lock(hdev);
4127                 mgmt_index_removed(hdev);
4128                 hci_dev_unlock(hdev);
4129         }
4130
4131         /* mgmt_index_removed should take care of emptying the
4132          * pending list */
4133         BUG_ON(!list_empty(&hdev->mgmt_pending));
4134
4135         hci_notify(hdev, HCI_DEV_UNREG);
4136
4137         if (hdev->rfkill) {
4138                 rfkill_unregister(hdev->rfkill);
4139                 rfkill_destroy(hdev->rfkill);
4140         }
4141
4142         if (hdev->tfm_aes)
4143                 crypto_free_blkcipher(hdev->tfm_aes);
4144
4145         device_del(&hdev->dev);
4146
4147         debugfs_remove_recursive(hdev->debugfs);
4148
4149         destroy_workqueue(hdev->workqueue);
4150         destroy_workqueue(hdev->req_workqueue);
4151
4152         hci_dev_lock(hdev);
4153         hci_bdaddr_list_clear(&hdev->blacklist);
4154         hci_bdaddr_list_clear(&hdev->whitelist);
4155         hci_uuids_clear(hdev);
4156         hci_link_keys_clear(hdev);
4157         hci_smp_ltks_clear(hdev);
4158         hci_smp_irks_clear(hdev);
4159         hci_remote_oob_data_clear(hdev);
4160         hci_bdaddr_list_clear(&hdev->le_white_list);
4161         hci_conn_params_clear_all(hdev);
4162         hci_dev_unlock(hdev);
4163
4164         hci_dev_put(hdev);
4165
4166         ida_simple_remove(&hci_index_ida, id);
4167 }
4168 EXPORT_SYMBOL(hci_unregister_dev);
4169
4170 /* Suspend HCI device */
4171 int hci_suspend_dev(struct hci_dev *hdev)
4172 {
4173         hci_notify(hdev, HCI_DEV_SUSPEND);
4174         return 0;
4175 }
4176 EXPORT_SYMBOL(hci_suspend_dev);
4177
4178 /* Resume HCI device */
4179 int hci_resume_dev(struct hci_dev *hdev)
4180 {
4181         hci_notify(hdev, HCI_DEV_RESUME);
4182         return 0;
4183 }
4184 EXPORT_SYMBOL(hci_resume_dev);
4185
4186 /* Receive frame from HCI drivers */
4187 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4188 {
4189         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4190                       && !test_bit(HCI_INIT, &hdev->flags))) {
4191                 kfree_skb(skb);
4192                 return -ENXIO;
4193         }
4194
4195         /* Incoming skb */
4196         bt_cb(skb)->incoming = 1;
4197
4198         /* Time stamp */
4199         __net_timestamp(skb);
4200
4201         skb_queue_tail(&hdev->rx_q, skb);
4202         queue_work(hdev->workqueue, &hdev->rx_work);
4203
4204         return 0;
4205 }
4206 EXPORT_SYMBOL(hci_recv_frame);
4207
4208 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4209                           int count, __u8 index)
4210 {
4211         int len = 0;
4212         int hlen = 0;
4213         int remain = count;
4214         struct sk_buff *skb;
4215         struct bt_skb_cb *scb;
4216
4217         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4218             index >= NUM_REASSEMBLY)
4219                 return -EILSEQ;
4220
4221         skb = hdev->reassembly[index];
4222
4223         if (!skb) {
4224                 switch (type) {
4225                 case HCI_ACLDATA_PKT:
4226                         len = HCI_MAX_FRAME_SIZE;
4227                         hlen = HCI_ACL_HDR_SIZE;
4228                         break;
4229                 case HCI_EVENT_PKT:
4230                         len = HCI_MAX_EVENT_SIZE;
4231                         hlen = HCI_EVENT_HDR_SIZE;
4232                         break;
4233                 case HCI_SCODATA_PKT:
4234                         len = HCI_MAX_SCO_SIZE;
4235                         hlen = HCI_SCO_HDR_SIZE;
4236                         break;
4237                 }
4238
4239                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4240                 if (!skb)
4241                         return -ENOMEM;
4242
4243                 scb = (void *) skb->cb;
4244                 scb->expect = hlen;
4245                 scb->pkt_type = type;
4246
4247                 hdev->reassembly[index] = skb;
4248         }
4249
4250         while (count) {
4251                 scb = (void *) skb->cb;
4252                 len = min_t(uint, scb->expect, count);
4253
4254                 memcpy(skb_put(skb, len), data, len);
4255
4256                 count -= len;
4257                 data += len;
4258                 scb->expect -= len;
4259                 remain = count;
4260
4261                 switch (type) {
4262                 case HCI_EVENT_PKT:
4263                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4264                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4265                                 scb->expect = h->plen;
4266
4267                                 if (skb_tailroom(skb) < scb->expect) {
4268                                         kfree_skb(skb);
4269                                         hdev->reassembly[index] = NULL;
4270                                         return -ENOMEM;
4271                                 }
4272                         }
4273                         break;
4274
4275                 case HCI_ACLDATA_PKT:
4276                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4277                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4278                                 scb->expect = __le16_to_cpu(h->dlen);
4279
4280                                 if (skb_tailroom(skb) < scb->expect) {
4281                                         kfree_skb(skb);
4282                                         hdev->reassembly[index] = NULL;
4283                                         return -ENOMEM;
4284                                 }
4285                         }
4286                         break;
4287
4288                 case HCI_SCODATA_PKT:
4289                         if (skb->len == HCI_SCO_HDR_SIZE) {
4290                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4291                                 scb->expect = h->dlen;
4292
4293                                 if (skb_tailroom(skb) < scb->expect) {
4294                                         kfree_skb(skb);
4295                                         hdev->reassembly[index] = NULL;
4296                                         return -ENOMEM;
4297                                 }
4298                         }
4299                         break;
4300                 }
4301
4302                 if (scb->expect == 0) {
4303                         /* Complete frame */
4304
4305                         bt_cb(skb)->pkt_type = type;
4306                         hci_recv_frame(hdev, skb);
4307
4308                         hdev->reassembly[index] = NULL;
4309                         return remain;
4310                 }
4311         }
4312
4313         return remain;
4314 }
4315
4316 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4317 {
4318         int rem = 0;
4319
4320         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4321                 return -EILSEQ;
4322
4323         while (count) {
4324                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4325                 if (rem < 0)
4326                         return rem;
4327
4328                 data += (count - rem);
4329                 count = rem;
4330         }
4331
4332         return rem;
4333 }
4334 EXPORT_SYMBOL(hci_recv_fragment);
4335
4336 #define STREAM_REASSEMBLY 0
4337
4338 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4339 {
4340         int type;
4341         int rem = 0;
4342
4343         while (count) {
4344                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4345
4346                 if (!skb) {
4347                         struct { char type; } *pkt;
4348
4349                         /* Start of the frame */
4350                         pkt = data;
4351                         type = pkt->type;
4352
4353                         data++;
4354                         count--;
4355                 } else
4356                         type = bt_cb(skb)->pkt_type;
4357
4358                 rem = hci_reassembly(hdev, type, data, count,
4359                                      STREAM_REASSEMBLY);
4360                 if (rem < 0)
4361                         return rem;
4362
4363                 data += (count - rem);
4364                 count = rem;
4365         }
4366
4367         return rem;
4368 }
4369 EXPORT_SYMBOL(hci_recv_stream_fragment);
4370
4371 /* ---- Interface to upper protocols ---- */
4372
4373 int hci_register_cb(struct hci_cb *cb)
4374 {
4375         BT_DBG("%p name %s", cb, cb->name);
4376
4377         write_lock(&hci_cb_list_lock);
4378         list_add(&cb->list, &hci_cb_list);
4379         write_unlock(&hci_cb_list_lock);
4380
4381         return 0;
4382 }
4383 EXPORT_SYMBOL(hci_register_cb);
4384
4385 int hci_unregister_cb(struct hci_cb *cb)
4386 {
4387         BT_DBG("%p name %s", cb, cb->name);
4388
4389         write_lock(&hci_cb_list_lock);
4390         list_del(&cb->list);
4391         write_unlock(&hci_cb_list_lock);
4392
4393         return 0;
4394 }
4395 EXPORT_SYMBOL(hci_unregister_cb);
4396
4397 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4398 {
4399         int err;
4400
4401         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4402
4403         /* Time stamp */
4404         __net_timestamp(skb);
4405
4406         /* Send copy to monitor */
4407         hci_send_to_monitor(hdev, skb);
4408
4409         if (atomic_read(&hdev->promisc)) {
4410                 /* Send copy to the sockets */
4411                 hci_send_to_sock(hdev, skb);
4412         }
4413
4414         /* Get rid of skb owner, prior to sending to the driver. */
4415         skb_orphan(skb);
4416
4417         err = hdev->send(hdev, skb);
4418         if (err < 0) {
4419                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4420                 kfree_skb(skb);
4421         }
4422 }
4423
4424 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4425 {
4426         skb_queue_head_init(&req->cmd_q);
4427         req->hdev = hdev;
4428         req->err = 0;
4429 }
4430
4431 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4432 {
4433         struct hci_dev *hdev = req->hdev;
4434         struct sk_buff *skb;
4435         unsigned long flags;
4436
4437         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4438
4439         /* If an error occured during request building, remove all HCI
4440          * commands queued on the HCI request queue.
4441          */
4442         if (req->err) {
4443                 skb_queue_purge(&req->cmd_q);
4444                 return req->err;
4445         }
4446
4447         /* Do not allow empty requests */
4448         if (skb_queue_empty(&req->cmd_q))
4449                 return -ENODATA;
4450
4451         skb = skb_peek_tail(&req->cmd_q);
4452         bt_cb(skb)->req.complete = complete;
4453
4454         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4455         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4456         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4457
4458         queue_work(hdev->workqueue, &hdev->cmd_work);
4459
4460         return 0;
4461 }
4462
4463 bool hci_req_pending(struct hci_dev *hdev)
4464 {
4465         return (hdev->req_status == HCI_REQ_PEND);
4466 }
4467
4468 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4469                                        u32 plen, const void *param)
4470 {
4471         int len = HCI_COMMAND_HDR_SIZE + plen;
4472         struct hci_command_hdr *hdr;
4473         struct sk_buff *skb;
4474
4475         skb = bt_skb_alloc(len, GFP_ATOMIC);
4476         if (!skb)
4477                 return NULL;
4478
4479         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4480         hdr->opcode = cpu_to_le16(opcode);
4481         hdr->plen   = plen;
4482
4483         if (plen)
4484                 memcpy(skb_put(skb, plen), param, plen);
4485
4486         BT_DBG("skb len %d", skb->len);
4487
4488         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4489
4490         return skb;
4491 }
4492
4493 /* Send HCI command */
4494 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4495                  const void *param)
4496 {
4497         struct sk_buff *skb;
4498
4499         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4500
4501         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4502         if (!skb) {
4503                 BT_ERR("%s no memory for command", hdev->name);
4504                 return -ENOMEM;
4505         }
4506
4507         /* Stand-alone HCI commands must be flaged as
4508          * single-command requests.
4509          */
4510         bt_cb(skb)->req.start = true;
4511
4512         skb_queue_tail(&hdev->cmd_q, skb);
4513         queue_work(hdev->workqueue, &hdev->cmd_work);
4514
4515         return 0;
4516 }
4517
4518 /* Queue a command to an asynchronous HCI request */
4519 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4520                     const void *param, u8 event)
4521 {
4522         struct hci_dev *hdev = req->hdev;
4523         struct sk_buff *skb;
4524
4525         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4526
4527         /* If an error occured during request building, there is no point in
4528          * queueing the HCI command. We can simply return.
4529          */
4530         if (req->err)
4531                 return;
4532
4533         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4534         if (!skb) {
4535                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4536                        hdev->name, opcode);
4537                 req->err = -ENOMEM;
4538                 return;
4539         }
4540
4541         if (skb_queue_empty(&req->cmd_q))
4542                 bt_cb(skb)->req.start = true;
4543
4544         bt_cb(skb)->req.event = event;
4545
4546         skb_queue_tail(&req->cmd_q, skb);
4547 }
4548
4549 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4550                  const void *param)
4551 {
4552         hci_req_add_ev(req, opcode, plen, param, 0);
4553 }
4554
4555 /* Get data from the previously sent command */
4556 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4557 {
4558         struct hci_command_hdr *hdr;
4559
4560         if (!hdev->sent_cmd)
4561                 return NULL;
4562
4563         hdr = (void *) hdev->sent_cmd->data;
4564
4565         if (hdr->opcode != cpu_to_le16(opcode))
4566                 return NULL;
4567
4568         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4569
4570         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4571 }
4572
4573 /* Send ACL data */
4574 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4575 {
4576         struct hci_acl_hdr *hdr;
4577         int len = skb->len;
4578
4579         skb_push(skb, HCI_ACL_HDR_SIZE);
4580         skb_reset_transport_header(skb);
4581         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4582         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4583         hdr->dlen   = cpu_to_le16(len);
4584 }
4585
4586 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4587                           struct sk_buff *skb, __u16 flags)
4588 {
4589         struct hci_conn *conn = chan->conn;
4590         struct hci_dev *hdev = conn->hdev;
4591         struct sk_buff *list;
4592
4593         skb->len = skb_headlen(skb);
4594         skb->data_len = 0;
4595
4596         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4597
4598         switch (hdev->dev_type) {
4599         case HCI_BREDR:
4600                 hci_add_acl_hdr(skb, conn->handle, flags);
4601                 break;
4602         case HCI_AMP:
4603                 hci_add_acl_hdr(skb, chan->handle, flags);
4604                 break;
4605         default:
4606                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4607                 return;
4608         }
4609
4610         list = skb_shinfo(skb)->frag_list;
4611         if (!list) {
4612                 /* Non fragmented */
4613                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4614
4615                 skb_queue_tail(queue, skb);
4616         } else {
4617                 /* Fragmented */
4618                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4619
4620                 skb_shinfo(skb)->frag_list = NULL;
4621
4622                 /* Queue all fragments atomically */
4623                 spin_lock(&queue->lock);
4624
4625                 __skb_queue_tail(queue, skb);
4626
4627                 flags &= ~ACL_START;
4628                 flags |= ACL_CONT;
4629                 do {
4630                         skb = list; list = list->next;
4631
4632                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4633                         hci_add_acl_hdr(skb, conn->handle, flags);
4634
4635                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4636
4637                         __skb_queue_tail(queue, skb);
4638                 } while (list);
4639
4640                 spin_unlock(&queue->lock);
4641         }
4642 }
4643
4644 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4645 {
4646         struct hci_dev *hdev = chan->conn->hdev;
4647
4648         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4649
4650         hci_queue_acl(chan, &chan->data_q, skb, flags);
4651
4652         queue_work(hdev->workqueue, &hdev->tx_work);
4653 }
4654
4655 /* Send SCO data */
4656 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4657 {
4658         struct hci_dev *hdev = conn->hdev;
4659         struct hci_sco_hdr hdr;
4660
4661         BT_DBG("%s len %d", hdev->name, skb->len);
4662
4663         hdr.handle = cpu_to_le16(conn->handle);
4664         hdr.dlen   = skb->len;
4665
4666         skb_push(skb, HCI_SCO_HDR_SIZE);
4667         skb_reset_transport_header(skb);
4668         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4669
4670         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4671
4672         skb_queue_tail(&conn->data_q, skb);
4673         queue_work(hdev->workqueue, &hdev->tx_work);
4674 }
4675
4676 /* ---- HCI TX task (outgoing data) ---- */
4677
4678 /* HCI Connection scheduler */
4679 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4680                                      int *quote)
4681 {
4682         struct hci_conn_hash *h = &hdev->conn_hash;
4683         struct hci_conn *conn = NULL, *c;
4684         unsigned int num = 0, min = ~0;
4685
4686         /* We don't have to lock device here. Connections are always
4687          * added and removed with TX task disabled. */
4688
4689         rcu_read_lock();
4690
4691         list_for_each_entry_rcu(c, &h->list, list) {
4692                 if (c->type != type || skb_queue_empty(&c->data_q))
4693                         continue;
4694
4695                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4696                         continue;
4697
4698                 num++;
4699
4700                 if (c->sent < min) {
4701                         min  = c->sent;
4702                         conn = c;
4703                 }
4704
4705                 if (hci_conn_num(hdev, type) == num)
4706                         break;
4707         }
4708
4709         rcu_read_unlock();
4710
4711         if (conn) {
4712                 int cnt, q;
4713
4714                 switch (conn->type) {
4715                 case ACL_LINK:
4716                         cnt = hdev->acl_cnt;
4717                         break;
4718                 case SCO_LINK:
4719                 case ESCO_LINK:
4720                         cnt = hdev->sco_cnt;
4721                         break;
4722                 case LE_LINK:
4723                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4724                         break;
4725                 default:
4726                         cnt = 0;
4727                         BT_ERR("Unknown link type");
4728                 }
4729
4730                 q = cnt / num;
4731                 *quote = q ? q : 1;
4732         } else
4733                 *quote = 0;
4734
4735         BT_DBG("conn %p quote %d", conn, *quote);
4736         return conn;
4737 }
4738
4739 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4740 {
4741         struct hci_conn_hash *h = &hdev->conn_hash;
4742         struct hci_conn *c;
4743
4744         BT_ERR("%s link tx timeout", hdev->name);
4745
4746         rcu_read_lock();
4747
4748         /* Kill stalled connections */
4749         list_for_each_entry_rcu(c, &h->list, list) {
4750                 if (c->type == type && c->sent) {
4751                         BT_ERR("%s killing stalled connection %pMR",
4752                                hdev->name, &c->dst);
4753                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4754                 }
4755         }
4756
4757         rcu_read_unlock();
4758 }
4759
4760 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4761                                       int *quote)
4762 {
4763         struct hci_conn_hash *h = &hdev->conn_hash;
4764         struct hci_chan *chan = NULL;
4765         unsigned int num = 0, min = ~0, cur_prio = 0;
4766         struct hci_conn *conn;
4767         int cnt, q, conn_num = 0;
4768
4769         BT_DBG("%s", hdev->name);
4770
4771         rcu_read_lock();
4772
4773         list_for_each_entry_rcu(conn, &h->list, list) {
4774                 struct hci_chan *tmp;
4775
4776                 if (conn->type != type)
4777                         continue;
4778
4779                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4780                         continue;
4781
4782                 conn_num++;
4783
4784                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4785                         struct sk_buff *skb;
4786
4787                         if (skb_queue_empty(&tmp->data_q))
4788                                 continue;
4789
4790                         skb = skb_peek(&tmp->data_q);
4791                         if (skb->priority < cur_prio)
4792                                 continue;
4793
4794                         if (skb->priority > cur_prio) {
4795                                 num = 0;
4796                                 min = ~0;
4797                                 cur_prio = skb->priority;
4798                         }
4799
4800                         num++;
4801
4802                         if (conn->sent < min) {
4803                                 min  = conn->sent;
4804                                 chan = tmp;
4805                         }
4806                 }
4807
4808                 if (hci_conn_num(hdev, type) == conn_num)
4809                         break;
4810         }
4811
4812         rcu_read_unlock();
4813
4814         if (!chan)
4815                 return NULL;
4816
4817         switch (chan->conn->type) {
4818         case ACL_LINK:
4819                 cnt = hdev->acl_cnt;
4820                 break;
4821         case AMP_LINK:
4822                 cnt = hdev->block_cnt;
4823                 break;
4824         case SCO_LINK:
4825         case ESCO_LINK:
4826                 cnt = hdev->sco_cnt;
4827                 break;
4828         case LE_LINK:
4829                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4830                 break;
4831         default:
4832                 cnt = 0;
4833                 BT_ERR("Unknown link type");
4834         }
4835
4836         q = cnt / num;
4837         *quote = q ? q : 1;
4838         BT_DBG("chan %p quote %d", chan, *quote);
4839         return chan;
4840 }
4841
4842 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4843 {
4844         struct hci_conn_hash *h = &hdev->conn_hash;
4845         struct hci_conn *conn;
4846         int num = 0;
4847
4848         BT_DBG("%s", hdev->name);
4849
4850         rcu_read_lock();
4851
4852         list_for_each_entry_rcu(conn, &h->list, list) {
4853                 struct hci_chan *chan;
4854
4855                 if (conn->type != type)
4856                         continue;
4857
4858                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4859                         continue;
4860
4861                 num++;
4862
4863                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4864                         struct sk_buff *skb;
4865
4866                         if (chan->sent) {
4867                                 chan->sent = 0;
4868                                 continue;
4869                         }
4870
4871                         if (skb_queue_empty(&chan->data_q))
4872                                 continue;
4873
4874                         skb = skb_peek(&chan->data_q);
4875                         if (skb->priority >= HCI_PRIO_MAX - 1)
4876                                 continue;
4877
4878                         skb->priority = HCI_PRIO_MAX - 1;
4879
4880                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4881                                skb->priority);
4882                 }
4883
4884                 if (hci_conn_num(hdev, type) == num)
4885                         break;
4886         }
4887
4888         rcu_read_unlock();
4889
4890 }
4891
4892 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4893 {
4894         /* Calculate count of blocks used by this packet */
4895         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4896 }
4897
4898 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4899 {
4900         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4901                 /* ACL tx timeout must be longer than maximum
4902                  * link supervision timeout (40.9 seconds) */
4903                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4904                                        HCI_ACL_TX_TIMEOUT))
4905                         hci_link_tx_to(hdev, ACL_LINK);
4906         }
4907 }
4908
4909 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4910 {
4911         unsigned int cnt = hdev->acl_cnt;
4912         struct hci_chan *chan;
4913         struct sk_buff *skb;
4914         int quote;
4915
4916         __check_timeout(hdev, cnt);
4917
4918         while (hdev->acl_cnt &&
4919                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4920                 u32 priority = (skb_peek(&chan->data_q))->priority;
4921                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4922                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4923                                skb->len, skb->priority);
4924
4925                         /* Stop if priority has changed */
4926                         if (skb->priority < priority)
4927                                 break;
4928
4929                         skb = skb_dequeue(&chan->data_q);
4930
4931                         hci_conn_enter_active_mode(chan->conn,
4932                                                    bt_cb(skb)->force_active);
4933
4934                         hci_send_frame(hdev, skb);
4935                         hdev->acl_last_tx = jiffies;
4936
4937                         hdev->acl_cnt--;
4938                         chan->sent++;
4939                         chan->conn->sent++;
4940                 }
4941         }
4942
4943         if (cnt != hdev->acl_cnt)
4944                 hci_prio_recalculate(hdev, ACL_LINK);
4945 }
4946
4947 static void hci_sched_acl_blk(struct hci_dev *hdev)
4948 {
4949         unsigned int cnt = hdev->block_cnt;
4950         struct hci_chan *chan;
4951         struct sk_buff *skb;
4952         int quote;
4953         u8 type;
4954
4955         __check_timeout(hdev, cnt);
4956
4957         BT_DBG("%s", hdev->name);
4958
4959         if (hdev->dev_type == HCI_AMP)
4960                 type = AMP_LINK;
4961         else
4962                 type = ACL_LINK;
4963
4964         while (hdev->block_cnt > 0 &&
4965                (chan = hci_chan_sent(hdev, type, &quote))) {
4966                 u32 priority = (skb_peek(&chan->data_q))->priority;
4967                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4968                         int blocks;
4969
4970                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4971                                skb->len, skb->priority);
4972
4973                         /* Stop if priority has changed */
4974                         if (skb->priority < priority)
4975                                 break;
4976
4977                         skb = skb_dequeue(&chan->data_q);
4978
4979                         blocks = __get_blocks(hdev, skb);
4980                         if (blocks > hdev->block_cnt)
4981                                 return;
4982
4983                         hci_conn_enter_active_mode(chan->conn,
4984                                                    bt_cb(skb)->force_active);
4985
4986                         hci_send_frame(hdev, skb);
4987                         hdev->acl_last_tx = jiffies;
4988
4989                         hdev->block_cnt -= blocks;
4990                         quote -= blocks;
4991
4992                         chan->sent += blocks;
4993                         chan->conn->sent += blocks;
4994                 }
4995         }
4996
4997         if (cnt != hdev->block_cnt)
4998                 hci_prio_recalculate(hdev, type);
4999 }
5000
5001 static void hci_sched_acl(struct hci_dev *hdev)
5002 {
5003         BT_DBG("%s", hdev->name);
5004
5005         /* No ACL link over BR/EDR controller */
5006         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5007                 return;
5008
5009         /* No AMP link over AMP controller */
5010         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5011                 return;
5012
5013         switch (hdev->flow_ctl_mode) {
5014         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5015                 hci_sched_acl_pkt(hdev);
5016                 break;
5017
5018         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5019                 hci_sched_acl_blk(hdev);
5020                 break;
5021         }
5022 }
5023
5024 /* Schedule SCO */
5025 static void hci_sched_sco(struct hci_dev *hdev)
5026 {
5027         struct hci_conn *conn;
5028         struct sk_buff *skb;
5029         int quote;
5030
5031         BT_DBG("%s", hdev->name);
5032
5033         if (!hci_conn_num(hdev, SCO_LINK))
5034                 return;
5035
5036         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5037                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5038                         BT_DBG("skb %p len %d", skb, skb->len);
5039                         hci_send_frame(hdev, skb);
5040
5041                         conn->sent++;
5042                         if (conn->sent == ~0)
5043                                 conn->sent = 0;
5044                 }
5045         }
5046 }
5047
5048 static void hci_sched_esco(struct hci_dev *hdev)
5049 {
5050         struct hci_conn *conn;
5051         struct sk_buff *skb;
5052         int quote;
5053
5054         BT_DBG("%s", hdev->name);
5055
5056         if (!hci_conn_num(hdev, ESCO_LINK))
5057                 return;
5058
5059         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5060                                                      &quote))) {
5061                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5062                         BT_DBG("skb %p len %d", skb, skb->len);
5063                         hci_send_frame(hdev, skb);
5064
5065                         conn->sent++;
5066                         if (conn->sent == ~0)
5067                                 conn->sent = 0;
5068                 }
5069         }
5070 }
5071
5072 static void hci_sched_le(struct hci_dev *hdev)
5073 {
5074         struct hci_chan *chan;
5075         struct sk_buff *skb;
5076         int quote, cnt, tmp;
5077
5078         BT_DBG("%s", hdev->name);
5079
5080         if (!hci_conn_num(hdev, LE_LINK))
5081                 return;
5082
5083         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5084                 /* LE tx timeout must be longer than maximum
5085                  * link supervision timeout (40.9 seconds) */
5086                 if (!hdev->le_cnt && hdev->le_pkts &&
5087                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5088                         hci_link_tx_to(hdev, LE_LINK);
5089         }
5090
5091         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5092         tmp = cnt;
5093         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5094                 u32 priority = (skb_peek(&chan->data_q))->priority;
5095                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5096                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5097                                skb->len, skb->priority);
5098
5099                         /* Stop if priority has changed */
5100                         if (skb->priority < priority)
5101                                 break;
5102
5103                         skb = skb_dequeue(&chan->data_q);
5104
5105                         hci_send_frame(hdev, skb);
5106                         hdev->le_last_tx = jiffies;
5107
5108                         cnt--;
5109                         chan->sent++;
5110                         chan->conn->sent++;
5111                 }
5112         }
5113
5114         if (hdev->le_pkts)
5115                 hdev->le_cnt = cnt;
5116         else
5117                 hdev->acl_cnt = cnt;
5118
5119         if (cnt != tmp)
5120                 hci_prio_recalculate(hdev, LE_LINK);
5121 }
5122
5123 static void hci_tx_work(struct work_struct *work)
5124 {
5125         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5126         struct sk_buff *skb;
5127
5128         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5129                hdev->sco_cnt, hdev->le_cnt);
5130
5131         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5132                 /* Schedule queues and send stuff to HCI driver */
5133                 hci_sched_acl(hdev);
5134                 hci_sched_sco(hdev);
5135                 hci_sched_esco(hdev);
5136                 hci_sched_le(hdev);
5137         }
5138
5139         /* Send next queued raw (unknown type) packet */
5140         while ((skb = skb_dequeue(&hdev->raw_q)))
5141                 hci_send_frame(hdev, skb);
5142 }
5143
5144 /* ----- HCI RX task (incoming data processing) ----- */
5145
5146 /* ACL data packet */
5147 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5148 {
5149         struct hci_acl_hdr *hdr = (void *) skb->data;
5150         struct hci_conn *conn;
5151         __u16 handle, flags;
5152
5153         skb_pull(skb, HCI_ACL_HDR_SIZE);
5154
5155         handle = __le16_to_cpu(hdr->handle);
5156         flags  = hci_flags(handle);
5157         handle = hci_handle(handle);
5158
5159         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5160                handle, flags);
5161
5162         hdev->stat.acl_rx++;
5163
5164         hci_dev_lock(hdev);
5165         conn = hci_conn_hash_lookup_handle(hdev, handle);
5166         hci_dev_unlock(hdev);
5167
5168         if (conn) {
5169                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5170
5171                 /* Send to upper protocol */
5172                 l2cap_recv_acldata(conn, skb, flags);
5173                 return;
5174         } else {
5175                 BT_ERR("%s ACL packet for unknown connection handle %d",
5176                        hdev->name, handle);
5177         }
5178
5179         kfree_skb(skb);
5180 }
5181
5182 /* SCO data packet */
5183 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5184 {
5185         struct hci_sco_hdr *hdr = (void *) skb->data;
5186         struct hci_conn *conn;
5187         __u16 handle;
5188
5189         skb_pull(skb, HCI_SCO_HDR_SIZE);
5190
5191         handle = __le16_to_cpu(hdr->handle);
5192
5193         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5194
5195         hdev->stat.sco_rx++;
5196
5197         hci_dev_lock(hdev);
5198         conn = hci_conn_hash_lookup_handle(hdev, handle);
5199         hci_dev_unlock(hdev);
5200
5201         if (conn) {
5202                 /* Send to upper protocol */
5203                 sco_recv_scodata(conn, skb);
5204                 return;
5205         } else {
5206                 BT_ERR("%s SCO packet for unknown connection handle %d",
5207                        hdev->name, handle);
5208         }
5209
5210         kfree_skb(skb);
5211 }
5212
5213 static bool hci_req_is_complete(struct hci_dev *hdev)
5214 {
5215         struct sk_buff *skb;
5216
5217         skb = skb_peek(&hdev->cmd_q);
5218         if (!skb)
5219                 return true;
5220
5221         return bt_cb(skb)->req.start;
5222 }
5223
5224 static void hci_resend_last(struct hci_dev *hdev)
5225 {
5226         struct hci_command_hdr *sent;
5227         struct sk_buff *skb;
5228         u16 opcode;
5229
5230         if (!hdev->sent_cmd)
5231                 return;
5232
5233         sent = (void *) hdev->sent_cmd->data;
5234         opcode = __le16_to_cpu(sent->opcode);
5235         if (opcode == HCI_OP_RESET)
5236                 return;
5237
5238         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5239         if (!skb)
5240                 return;
5241
5242         skb_queue_head(&hdev->cmd_q, skb);
5243         queue_work(hdev->workqueue, &hdev->cmd_work);
5244 }
5245
5246 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5247 {
5248         hci_req_complete_t req_complete = NULL;
5249         struct sk_buff *skb;
5250         unsigned long flags;
5251
5252         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5253
5254         /* If the completed command doesn't match the last one that was
5255          * sent we need to do special handling of it.
5256          */
5257         if (!hci_sent_cmd_data(hdev, opcode)) {
5258                 /* Some CSR based controllers generate a spontaneous
5259                  * reset complete event during init and any pending
5260                  * command will never be completed. In such a case we
5261                  * need to resend whatever was the last sent
5262                  * command.
5263                  */
5264                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5265                         hci_resend_last(hdev);
5266
5267                 return;
5268         }
5269
5270         /* If the command succeeded and there's still more commands in
5271          * this request the request is not yet complete.
5272          */
5273         if (!status && !hci_req_is_complete(hdev))
5274                 return;
5275
5276         /* If this was the last command in a request the complete
5277          * callback would be found in hdev->sent_cmd instead of the
5278          * command queue (hdev->cmd_q).
5279          */
5280         if (hdev->sent_cmd) {
5281                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5282
5283                 if (req_complete) {
5284                         /* We must set the complete callback to NULL to
5285                          * avoid calling the callback more than once if
5286                          * this function gets called again.
5287                          */
5288                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5289
5290                         goto call_complete;
5291                 }
5292         }
5293
5294         /* Remove all pending commands belonging to this request */
5295         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5296         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5297                 if (bt_cb(skb)->req.start) {
5298                         __skb_queue_head(&hdev->cmd_q, skb);
5299                         break;
5300                 }
5301
5302                 req_complete = bt_cb(skb)->req.complete;
5303                 kfree_skb(skb);
5304         }
5305         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5306
5307 call_complete:
5308         if (req_complete)
5309                 req_complete(hdev, status);
5310 }
5311
5312 static void hci_rx_work(struct work_struct *work)
5313 {
5314         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5315         struct sk_buff *skb;
5316
5317         BT_DBG("%s", hdev->name);
5318
5319         while ((skb = skb_dequeue(&hdev->rx_q))) {
5320                 /* Send copy to monitor */
5321                 hci_send_to_monitor(hdev, skb);
5322
5323                 if (atomic_read(&hdev->promisc)) {
5324                         /* Send copy to the sockets */
5325                         hci_send_to_sock(hdev, skb);
5326                 }
5327
5328                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5329                         kfree_skb(skb);
5330                         continue;
5331                 }
5332
5333                 if (test_bit(HCI_INIT, &hdev->flags)) {
5334                         /* Don't process data packets in this states. */
5335                         switch (bt_cb(skb)->pkt_type) {
5336                         case HCI_ACLDATA_PKT:
5337                         case HCI_SCODATA_PKT:
5338                                 kfree_skb(skb);
5339                                 continue;
5340                         }
5341                 }
5342
5343                 /* Process frame */
5344                 switch (bt_cb(skb)->pkt_type) {
5345                 case HCI_EVENT_PKT:
5346                         BT_DBG("%s Event packet", hdev->name);
5347                         hci_event_packet(hdev, skb);
5348                         break;
5349
5350                 case HCI_ACLDATA_PKT:
5351                         BT_DBG("%s ACL data packet", hdev->name);
5352                         hci_acldata_packet(hdev, skb);
5353                         break;
5354
5355                 case HCI_SCODATA_PKT:
5356                         BT_DBG("%s SCO data packet", hdev->name);
5357                         hci_scodata_packet(hdev, skb);
5358                         break;
5359
5360                 default:
5361                         kfree_skb(skb);
5362                         break;
5363                 }
5364         }
5365 }
5366
5367 static void hci_cmd_work(struct work_struct *work)
5368 {
5369         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5370         struct sk_buff *skb;
5371
5372         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5373                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5374
5375         /* Send queued commands */
5376         if (atomic_read(&hdev->cmd_cnt)) {
5377                 skb = skb_dequeue(&hdev->cmd_q);
5378                 if (!skb)
5379                         return;
5380
5381                 kfree_skb(hdev->sent_cmd);
5382
5383                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5384                 if (hdev->sent_cmd) {
5385                         atomic_dec(&hdev->cmd_cnt);
5386                         hci_send_frame(hdev, skb);
5387                         if (test_bit(HCI_RESET, &hdev->flags))
5388                                 cancel_delayed_work(&hdev->cmd_timer);
5389                         else
5390                                 schedule_delayed_work(&hdev->cmd_timer,
5391                                                       HCI_CMD_TIMEOUT);
5392                 } else {
5393                         skb_queue_head(&hdev->cmd_q, skb);
5394                         queue_work(hdev->workqueue, &hdev->cmd_work);
5395                 }
5396         }
5397 }
5398
5399 void hci_req_add_le_scan_disable(struct hci_request *req)
5400 {
5401         struct hci_cp_le_set_scan_enable cp;
5402
5403         memset(&cp, 0, sizeof(cp));
5404         cp.enable = LE_SCAN_DISABLE;
5405         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5406 }
5407
5408 void hci_req_add_le_passive_scan(struct hci_request *req)
5409 {
5410         struct hci_cp_le_set_scan_param param_cp;
5411         struct hci_cp_le_set_scan_enable enable_cp;
5412         struct hci_dev *hdev = req->hdev;
5413         u8 own_addr_type;
5414
5415         /* Set require_privacy to false since no SCAN_REQ are send
5416          * during passive scanning. Not using an unresolvable address
5417          * here is important so that peer devices using direct
5418          * advertising with our address will be correctly reported
5419          * by the controller.
5420          */
5421         if (hci_update_random_address(req, false, &own_addr_type))
5422                 return;
5423
5424         memset(&param_cp, 0, sizeof(param_cp));
5425         param_cp.type = LE_SCAN_PASSIVE;
5426         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5427         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5428         param_cp.own_address_type = own_addr_type;
5429         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5430                     &param_cp);
5431
5432         memset(&enable_cp, 0, sizeof(enable_cp));
5433         enable_cp.enable = LE_SCAN_ENABLE;
5434         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5435         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5436                     &enable_cp);
5437 }
5438
5439 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5440 {
5441         if (status)
5442                 BT_DBG("HCI request failed to update background scanning: "
5443                        "status 0x%2.2x", status);
5444 }
5445
5446 /* This function controls the background scanning based on hdev->pend_le_conns
5447  * list. If there are pending LE connection we start the background scanning,
5448  * otherwise we stop it.
5449  *
5450  * This function requires the caller holds hdev->lock.
5451  */
5452 void hci_update_background_scan(struct hci_dev *hdev)
5453 {
5454         struct hci_request req;
5455         struct hci_conn *conn;
5456         int err;
5457
5458         if (!test_bit(HCI_UP, &hdev->flags) ||
5459             test_bit(HCI_INIT, &hdev->flags) ||
5460             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5461             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5462             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5463             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5464                 return;
5465
5466         /* No point in doing scanning if LE support hasn't been enabled */
5467         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5468                 return;
5469
5470         /* If discovery is active don't interfere with it */
5471         if (hdev->discovery.state != DISCOVERY_STOPPED)
5472                 return;
5473
5474         hci_req_init(&req, hdev);
5475
5476         if (list_empty(&hdev->pend_le_conns) &&
5477             list_empty(&hdev->pend_le_reports)) {
5478                 /* If there is no pending LE connections or devices
5479                  * to be scanned for, we should stop the background
5480                  * scanning.
5481                  */
5482
5483                 /* If controller is not scanning we are done. */
5484                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5485                         return;
5486
5487                 hci_req_add_le_scan_disable(&req);
5488
5489                 BT_DBG("%s stopping background scanning", hdev->name);
5490         } else {
5491                 /* If there is at least one pending LE connection, we should
5492                  * keep the background scan running.
5493                  */
5494
5495                 /* If controller is connecting, we should not start scanning
5496                  * since some controllers are not able to scan and connect at
5497                  * the same time.
5498                  */
5499                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5500                 if (conn)
5501                         return;
5502
5503                 /* If controller is currently scanning, we stop it to ensure we
5504                  * don't miss any advertising (due to duplicates filter).
5505                  */
5506                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5507                         hci_req_add_le_scan_disable(&req);
5508
5509                 hci_req_add_le_passive_scan(&req);
5510
5511                 BT_DBG("%s starting background scanning", hdev->name);
5512         }
5513
5514         err = hci_req_run(&req, update_background_scan_complete);
5515         if (err)
5516                 BT_ERR("Failed to run HCI request: err %d", err);
5517 }