Merge remote-tracking branch 'wireless-next/master' into iwlwifi-next
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE      0
60 #define HCI_REQ_PEND      1
61 #define HCI_REQ_CANCELED  2
62
63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70         hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76                              size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         char buf[3];
80
81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82         buf[1] = '\n';
83         buf[2] = '\0';
84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88                               size_t count, loff_t *ppos)
89 {
90         struct hci_dev *hdev = file->private_data;
91         struct sk_buff *skb;
92         char buf[32];
93         size_t buf_size = min(count, (sizeof(buf)-1));
94         bool enable;
95         int err;
96
97         if (!test_bit(HCI_UP, &hdev->flags))
98                 return -ENETDOWN;
99
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124
125         if (err < 0)
126                 return err;
127
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130         return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162
163         return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187
188         return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202
203 static int whitelist_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bdaddr_list *b;
207
208         hci_dev_lock(hdev);
209         list_for_each_entry(b, &hdev->whitelist, list)
210                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211         hci_dev_unlock(hdev);
212
213         return 0;
214 }
215
216 static int whitelist_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, whitelist_show, inode->i_private);
219 }
220
221 static const struct file_operations whitelist_fops = {
222         .open           = whitelist_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227
228 static int uuids_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct bt_uuid *uuid;
232
233         hci_dev_lock(hdev);
234         list_for_each_entry(uuid, &hdev->uuids, list) {
235                 u8 i, val[16];
236
237                 /* The Bluetooth UUID values are stored in big endian,
238                  * but with reversed byte order. So convert them into
239                  * the right order for the %pUb modifier.
240                  */
241                 for (i = 0; i < 16; i++)
242                         val[i] = uuid->uuid[15 - i];
243
244                 seq_printf(f, "%pUb\n", val);
245         }
246         hci_dev_unlock(hdev);
247
248         return 0;
249 }
250
251 static int uuids_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, uuids_show, inode->i_private);
254 }
255
256 static const struct file_operations uuids_fops = {
257         .open           = uuids_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262
263 static int inquiry_cache_show(struct seq_file *f, void *p)
264 {
265         struct hci_dev *hdev = f->private;
266         struct discovery_state *cache = &hdev->discovery;
267         struct inquiry_entry *e;
268
269         hci_dev_lock(hdev);
270
271         list_for_each_entry(e, &cache->all, all) {
272                 struct inquiry_data *data = &e->data;
273                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274                            &data->bdaddr,
275                            data->pscan_rep_mode, data->pscan_period_mode,
276                            data->pscan_mode, data->dev_class[2],
277                            data->dev_class[1], data->dev_class[0],
278                            __le16_to_cpu(data->clock_offset),
279                            data->rssi, data->ssp_mode, e->timestamp);
280         }
281
282         hci_dev_unlock(hdev);
283
284         return 0;
285 }
286
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
288 {
289         return single_open(file, inquiry_cache_show, inode->i_private);
290 }
291
292 static const struct file_operations inquiry_cache_fops = {
293         .open           = inquiry_cache_open,
294         .read           = seq_read,
295         .llseek         = seq_lseek,
296         .release        = single_release,
297 };
298
299 static int link_keys_show(struct seq_file *f, void *ptr)
300 {
301         struct hci_dev *hdev = f->private;
302         struct list_head *p, *n;
303
304         hci_dev_lock(hdev);
305         list_for_each_safe(p, n, &hdev->link_keys) {
306                 struct link_key *key = list_entry(p, struct link_key, list);
307                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309         }
310         hci_dev_unlock(hdev);
311
312         return 0;
313 }
314
315 static int link_keys_open(struct inode *inode, struct file *file)
316 {
317         return single_open(file, link_keys_show, inode->i_private);
318 }
319
320 static const struct file_operations link_keys_fops = {
321         .open           = link_keys_open,
322         .read           = seq_read,
323         .llseek         = seq_lseek,
324         .release        = single_release,
325 };
326
327 static int dev_class_show(struct seq_file *f, void *ptr)
328 {
329         struct hci_dev *hdev = f->private;
330
331         hci_dev_lock(hdev);
332         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333                    hdev->dev_class[1], hdev->dev_class[0]);
334         hci_dev_unlock(hdev);
335
336         return 0;
337 }
338
339 static int dev_class_open(struct inode *inode, struct file *file)
340 {
341         return single_open(file, dev_class_show, inode->i_private);
342 }
343
344 static const struct file_operations dev_class_fops = {
345         .open           = dev_class_open,
346         .read           = seq_read,
347         .llseek         = seq_lseek,
348         .release        = single_release,
349 };
350
351 static int voice_setting_get(void *data, u64 *val)
352 {
353         struct hci_dev *hdev = data;
354
355         hci_dev_lock(hdev);
356         *val = hdev->voice_setting;
357         hci_dev_unlock(hdev);
358
359         return 0;
360 }
361
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363                         NULL, "0x%4.4llx\n");
364
365 static int auto_accept_delay_set(void *data, u64 val)
366 {
367         struct hci_dev *hdev = data;
368
369         hci_dev_lock(hdev);
370         hdev->auto_accept_delay = val;
371         hci_dev_unlock(hdev);
372
373         return 0;
374 }
375
376 static int auto_accept_delay_get(void *data, u64 *val)
377 {
378         struct hci_dev *hdev = data;
379
380         hci_dev_lock(hdev);
381         *val = hdev->auto_accept_delay;
382         hci_dev_unlock(hdev);
383
384         return 0;
385 }
386
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388                         auto_accept_delay_set, "%llu\n");
389
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391                                      size_t count, loff_t *ppos)
392 {
393         struct hci_dev *hdev = file->private_data;
394         char buf[3];
395
396         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
397         buf[1] = '\n';
398         buf[2] = '\0';
399         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400 }
401
402 static ssize_t force_sc_support_write(struct file *file,
403                                       const char __user *user_buf,
404                                       size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[32];
408         size_t buf_size = min(count, (sizeof(buf)-1));
409         bool enable;
410
411         if (test_bit(HCI_UP, &hdev->flags))
412                 return -EBUSY;
413
414         if (copy_from_user(buf, user_buf, buf_size))
415                 return -EFAULT;
416
417         buf[buf_size] = '\0';
418         if (strtobool(buf, &enable))
419                 return -EINVAL;
420
421         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
422                 return -EALREADY;
423
424         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
425
426         return count;
427 }
428
429 static const struct file_operations force_sc_support_fops = {
430         .open           = simple_open,
431         .read           = force_sc_support_read,
432         .write          = force_sc_support_write,
433         .llseek         = default_llseek,
434 };
435
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437                                  size_t count, loff_t *ppos)
438 {
439         struct hci_dev *hdev = file->private_data;
440         char buf[3];
441
442         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443         buf[1] = '\n';
444         buf[2] = '\0';
445         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446 }
447
448 static const struct file_operations sc_only_mode_fops = {
449         .open           = simple_open,
450         .read           = sc_only_mode_read,
451         .llseek         = default_llseek,
452 };
453
454 static int idle_timeout_set(void *data, u64 val)
455 {
456         struct hci_dev *hdev = data;
457
458         if (val != 0 && (val < 500 || val > 3600000))
459                 return -EINVAL;
460
461         hci_dev_lock(hdev);
462         hdev->idle_timeout = val;
463         hci_dev_unlock(hdev);
464
465         return 0;
466 }
467
468 static int idle_timeout_get(void *data, u64 *val)
469 {
470         struct hci_dev *hdev = data;
471
472         hci_dev_lock(hdev);
473         *val = hdev->idle_timeout;
474         hci_dev_unlock(hdev);
475
476         return 0;
477 }
478
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480                         idle_timeout_set, "%llu\n");
481
482 static int rpa_timeout_set(void *data, u64 val)
483 {
484         struct hci_dev *hdev = data;
485
486         /* Require the RPA timeout to be at least 30 seconds and at most
487          * 24 hours.
488          */
489         if (val < 30 || val > (60 * 60 * 24))
490                 return -EINVAL;
491
492         hci_dev_lock(hdev);
493         hdev->rpa_timeout = val;
494         hci_dev_unlock(hdev);
495
496         return 0;
497 }
498
499 static int rpa_timeout_get(void *data, u64 *val)
500 {
501         struct hci_dev *hdev = data;
502
503         hci_dev_lock(hdev);
504         *val = hdev->rpa_timeout;
505         hci_dev_unlock(hdev);
506
507         return 0;
508 }
509
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511                         rpa_timeout_set, "%llu\n");
512
513 static int sniff_min_interval_set(void *data, u64 val)
514 {
515         struct hci_dev *hdev = data;
516
517         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518                 return -EINVAL;
519
520         hci_dev_lock(hdev);
521         hdev->sniff_min_interval = val;
522         hci_dev_unlock(hdev);
523
524         return 0;
525 }
526
527 static int sniff_min_interval_get(void *data, u64 *val)
528 {
529         struct hci_dev *hdev = data;
530
531         hci_dev_lock(hdev);
532         *val = hdev->sniff_min_interval;
533         hci_dev_unlock(hdev);
534
535         return 0;
536 }
537
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539                         sniff_min_interval_set, "%llu\n");
540
541 static int sniff_max_interval_set(void *data, u64 val)
542 {
543         struct hci_dev *hdev = data;
544
545         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546                 return -EINVAL;
547
548         hci_dev_lock(hdev);
549         hdev->sniff_max_interval = val;
550         hci_dev_unlock(hdev);
551
552         return 0;
553 }
554
555 static int sniff_max_interval_get(void *data, u64 *val)
556 {
557         struct hci_dev *hdev = data;
558
559         hci_dev_lock(hdev);
560         *val = hdev->sniff_max_interval;
561         hci_dev_unlock(hdev);
562
563         return 0;
564 }
565
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567                         sniff_max_interval_set, "%llu\n");
568
569 static int conn_info_min_age_set(void *data, u64 val)
570 {
571         struct hci_dev *hdev = data;
572
573         if (val == 0 || val > hdev->conn_info_max_age)
574                 return -EINVAL;
575
576         hci_dev_lock(hdev);
577         hdev->conn_info_min_age = val;
578         hci_dev_unlock(hdev);
579
580         return 0;
581 }
582
583 static int conn_info_min_age_get(void *data, u64 *val)
584 {
585         struct hci_dev *hdev = data;
586
587         hci_dev_lock(hdev);
588         *val = hdev->conn_info_min_age;
589         hci_dev_unlock(hdev);
590
591         return 0;
592 }
593
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595                         conn_info_min_age_set, "%llu\n");
596
597 static int conn_info_max_age_set(void *data, u64 val)
598 {
599         struct hci_dev *hdev = data;
600
601         if (val == 0 || val < hdev->conn_info_min_age)
602                 return -EINVAL;
603
604         hci_dev_lock(hdev);
605         hdev->conn_info_max_age = val;
606         hci_dev_unlock(hdev);
607
608         return 0;
609 }
610
611 static int conn_info_max_age_get(void *data, u64 *val)
612 {
613         struct hci_dev *hdev = data;
614
615         hci_dev_lock(hdev);
616         *val = hdev->conn_info_max_age;
617         hci_dev_unlock(hdev);
618
619         return 0;
620 }
621
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623                         conn_info_max_age_set, "%llu\n");
624
625 static int identity_show(struct seq_file *f, void *p)
626 {
627         struct hci_dev *hdev = f->private;
628         bdaddr_t addr;
629         u8 addr_type;
630
631         hci_dev_lock(hdev);
632
633         hci_copy_identity_address(hdev, &addr, &addr_type);
634
635         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636                    16, hdev->irk, &hdev->rpa);
637
638         hci_dev_unlock(hdev);
639
640         return 0;
641 }
642
643 static int identity_open(struct inode *inode, struct file *file)
644 {
645         return single_open(file, identity_show, inode->i_private);
646 }
647
648 static const struct file_operations identity_fops = {
649         .open           = identity_open,
650         .read           = seq_read,
651         .llseek         = seq_lseek,
652         .release        = single_release,
653 };
654
655 static int random_address_show(struct seq_file *f, void *p)
656 {
657         struct hci_dev *hdev = f->private;
658
659         hci_dev_lock(hdev);
660         seq_printf(f, "%pMR\n", &hdev->random_addr);
661         hci_dev_unlock(hdev);
662
663         return 0;
664 }
665
666 static int random_address_open(struct inode *inode, struct file *file)
667 {
668         return single_open(file, random_address_show, inode->i_private);
669 }
670
671 static const struct file_operations random_address_fops = {
672         .open           = random_address_open,
673         .read           = seq_read,
674         .llseek         = seq_lseek,
675         .release        = single_release,
676 };
677
678 static int static_address_show(struct seq_file *f, void *p)
679 {
680         struct hci_dev *hdev = f->private;
681
682         hci_dev_lock(hdev);
683         seq_printf(f, "%pMR\n", &hdev->static_addr);
684         hci_dev_unlock(hdev);
685
686         return 0;
687 }
688
689 static int static_address_open(struct inode *inode, struct file *file)
690 {
691         return single_open(file, static_address_show, inode->i_private);
692 }
693
694 static const struct file_operations static_address_fops = {
695         .open           = static_address_open,
696         .read           = seq_read,
697         .llseek         = seq_lseek,
698         .release        = single_release,
699 };
700
701 static ssize_t force_static_address_read(struct file *file,
702                                          char __user *user_buf,
703                                          size_t count, loff_t *ppos)
704 {
705         struct hci_dev *hdev = file->private_data;
706         char buf[3];
707
708         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
709         buf[1] = '\n';
710         buf[2] = '\0';
711         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712 }
713
714 static ssize_t force_static_address_write(struct file *file,
715                                           const char __user *user_buf,
716                                           size_t count, loff_t *ppos)
717 {
718         struct hci_dev *hdev = file->private_data;
719         char buf[32];
720         size_t buf_size = min(count, (sizeof(buf)-1));
721         bool enable;
722
723         if (test_bit(HCI_UP, &hdev->flags))
724                 return -EBUSY;
725
726         if (copy_from_user(buf, user_buf, buf_size))
727                 return -EFAULT;
728
729         buf[buf_size] = '\0';
730         if (strtobool(buf, &enable))
731                 return -EINVAL;
732
733         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
734                 return -EALREADY;
735
736         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
737
738         return count;
739 }
740
741 static const struct file_operations force_static_address_fops = {
742         .open           = simple_open,
743         .read           = force_static_address_read,
744         .write          = force_static_address_write,
745         .llseek         = default_llseek,
746 };
747
748 static int white_list_show(struct seq_file *f, void *ptr)
749 {
750         struct hci_dev *hdev = f->private;
751         struct bdaddr_list *b;
752
753         hci_dev_lock(hdev);
754         list_for_each_entry(b, &hdev->le_white_list, list)
755                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756         hci_dev_unlock(hdev);
757
758         return 0;
759 }
760
761 static int white_list_open(struct inode *inode, struct file *file)
762 {
763         return single_open(file, white_list_show, inode->i_private);
764 }
765
766 static const struct file_operations white_list_fops = {
767         .open           = white_list_open,
768         .read           = seq_read,
769         .llseek         = seq_lseek,
770         .release        = single_release,
771 };
772
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774 {
775         struct hci_dev *hdev = f->private;
776         struct list_head *p, *n;
777
778         hci_dev_lock(hdev);
779         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782                            &irk->bdaddr, irk->addr_type,
783                            16, irk->val, &irk->rpa);
784         }
785         hci_dev_unlock(hdev);
786
787         return 0;
788 }
789
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791 {
792         return single_open(file, identity_resolving_keys_show,
793                            inode->i_private);
794 }
795
796 static const struct file_operations identity_resolving_keys_fops = {
797         .open           = identity_resolving_keys_open,
798         .read           = seq_read,
799         .llseek         = seq_lseek,
800         .release        = single_release,
801 };
802
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
804 {
805         struct hci_dev *hdev = f->private;
806         struct list_head *p, *n;
807
808         hci_dev_lock(hdev);
809         list_for_each_safe(p, n, &hdev->long_term_keys) {
810                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814                            __le64_to_cpu(ltk->rand), 16, ltk->val);
815         }
816         hci_dev_unlock(hdev);
817
818         return 0;
819 }
820
821 static int long_term_keys_open(struct inode *inode, struct file *file)
822 {
823         return single_open(file, long_term_keys_show, inode->i_private);
824 }
825
826 static const struct file_operations long_term_keys_fops = {
827         .open           = long_term_keys_open,
828         .read           = seq_read,
829         .llseek         = seq_lseek,
830         .release        = single_release,
831 };
832
833 static int conn_min_interval_set(void *data, u64 val)
834 {
835         struct hci_dev *hdev = data;
836
837         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838                 return -EINVAL;
839
840         hci_dev_lock(hdev);
841         hdev->le_conn_min_interval = val;
842         hci_dev_unlock(hdev);
843
844         return 0;
845 }
846
847 static int conn_min_interval_get(void *data, u64 *val)
848 {
849         struct hci_dev *hdev = data;
850
851         hci_dev_lock(hdev);
852         *val = hdev->le_conn_min_interval;
853         hci_dev_unlock(hdev);
854
855         return 0;
856 }
857
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859                         conn_min_interval_set, "%llu\n");
860
861 static int conn_max_interval_set(void *data, u64 val)
862 {
863         struct hci_dev *hdev = data;
864
865         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866                 return -EINVAL;
867
868         hci_dev_lock(hdev);
869         hdev->le_conn_max_interval = val;
870         hci_dev_unlock(hdev);
871
872         return 0;
873 }
874
875 static int conn_max_interval_get(void *data, u64 *val)
876 {
877         struct hci_dev *hdev = data;
878
879         hci_dev_lock(hdev);
880         *val = hdev->le_conn_max_interval;
881         hci_dev_unlock(hdev);
882
883         return 0;
884 }
885
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887                         conn_max_interval_set, "%llu\n");
888
889 static int conn_latency_set(void *data, u64 val)
890 {
891         struct hci_dev *hdev = data;
892
893         if (val > 0x01f3)
894                 return -EINVAL;
895
896         hci_dev_lock(hdev);
897         hdev->le_conn_latency = val;
898         hci_dev_unlock(hdev);
899
900         return 0;
901 }
902
903 static int conn_latency_get(void *data, u64 *val)
904 {
905         struct hci_dev *hdev = data;
906
907         hci_dev_lock(hdev);
908         *val = hdev->le_conn_latency;
909         hci_dev_unlock(hdev);
910
911         return 0;
912 }
913
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915                         conn_latency_set, "%llu\n");
916
917 static int supervision_timeout_set(void *data, u64 val)
918 {
919         struct hci_dev *hdev = data;
920
921         if (val < 0x000a || val > 0x0c80)
922                 return -EINVAL;
923
924         hci_dev_lock(hdev);
925         hdev->le_supv_timeout = val;
926         hci_dev_unlock(hdev);
927
928         return 0;
929 }
930
931 static int supervision_timeout_get(void *data, u64 *val)
932 {
933         struct hci_dev *hdev = data;
934
935         hci_dev_lock(hdev);
936         *val = hdev->le_supv_timeout;
937         hci_dev_unlock(hdev);
938
939         return 0;
940 }
941
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943                         supervision_timeout_set, "%llu\n");
944
945 static int adv_channel_map_set(void *data, u64 val)
946 {
947         struct hci_dev *hdev = data;
948
949         if (val < 0x01 || val > 0x07)
950                 return -EINVAL;
951
952         hci_dev_lock(hdev);
953         hdev->le_adv_channel_map = val;
954         hci_dev_unlock(hdev);
955
956         return 0;
957 }
958
959 static int adv_channel_map_get(void *data, u64 *val)
960 {
961         struct hci_dev *hdev = data;
962
963         hci_dev_lock(hdev);
964         *val = hdev->le_adv_channel_map;
965         hci_dev_unlock(hdev);
966
967         return 0;
968 }
969
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971                         adv_channel_map_set, "%llu\n");
972
973 static int device_list_show(struct seq_file *f, void *ptr)
974 {
975         struct hci_dev *hdev = f->private;
976         struct hci_conn_params *p;
977
978         hci_dev_lock(hdev);
979         list_for_each_entry(p, &hdev->le_conn_params, list) {
980                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
981                            p->auto_connect);
982         }
983         hci_dev_unlock(hdev);
984
985         return 0;
986 }
987
988 static int device_list_open(struct inode *inode, struct file *file)
989 {
990         return single_open(file, device_list_show, inode->i_private);
991 }
992
993 static const struct file_operations device_list_fops = {
994         .open           = device_list_open,
995         .read           = seq_read,
996         .llseek         = seq_lseek,
997         .release        = single_release,
998 };
999
1000 /* ---- HCI requests ---- */
1001
1002 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1003 {
1004         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1005
1006         if (hdev->req_status == HCI_REQ_PEND) {
1007                 hdev->req_result = result;
1008                 hdev->req_status = HCI_REQ_DONE;
1009                 wake_up_interruptible(&hdev->req_wait_q);
1010         }
1011 }
1012
1013 static void hci_req_cancel(struct hci_dev *hdev, int err)
1014 {
1015         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1016
1017         if (hdev->req_status == HCI_REQ_PEND) {
1018                 hdev->req_result = err;
1019                 hdev->req_status = HCI_REQ_CANCELED;
1020                 wake_up_interruptible(&hdev->req_wait_q);
1021         }
1022 }
1023
1024 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1025                                             u8 event)
1026 {
1027         struct hci_ev_cmd_complete *ev;
1028         struct hci_event_hdr *hdr;
1029         struct sk_buff *skb;
1030
1031         hci_dev_lock(hdev);
1032
1033         skb = hdev->recv_evt;
1034         hdev->recv_evt = NULL;
1035
1036         hci_dev_unlock(hdev);
1037
1038         if (!skb)
1039                 return ERR_PTR(-ENODATA);
1040
1041         if (skb->len < sizeof(*hdr)) {
1042                 BT_ERR("Too short HCI event");
1043                 goto failed;
1044         }
1045
1046         hdr = (void *) skb->data;
1047         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1048
1049         if (event) {
1050                 if (hdr->evt != event)
1051                         goto failed;
1052                 return skb;
1053         }
1054
1055         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1057                 goto failed;
1058         }
1059
1060         if (skb->len < sizeof(*ev)) {
1061                 BT_ERR("Too short cmd_complete event");
1062                 goto failed;
1063         }
1064
1065         ev = (void *) skb->data;
1066         skb_pull(skb, sizeof(*ev));
1067
1068         if (opcode == __le16_to_cpu(ev->opcode))
1069                 return skb;
1070
1071         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072                __le16_to_cpu(ev->opcode));
1073
1074 failed:
1075         kfree_skb(skb);
1076         return ERR_PTR(-ENODATA);
1077 }
1078
1079 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1080                                   const void *param, u8 event, u32 timeout)
1081 {
1082         DECLARE_WAITQUEUE(wait, current);
1083         struct hci_request req;
1084         int err = 0;
1085
1086         BT_DBG("%s", hdev->name);
1087
1088         hci_req_init(&req, hdev);
1089
1090         hci_req_add_ev(&req, opcode, plen, param, event);
1091
1092         hdev->req_status = HCI_REQ_PEND;
1093
1094         err = hci_req_run(&req, hci_req_sync_complete);
1095         if (err < 0)
1096                 return ERR_PTR(err);
1097
1098         add_wait_queue(&hdev->req_wait_q, &wait);
1099         set_current_state(TASK_INTERRUPTIBLE);
1100
1101         schedule_timeout(timeout);
1102
1103         remove_wait_queue(&hdev->req_wait_q, &wait);
1104
1105         if (signal_pending(current))
1106                 return ERR_PTR(-EINTR);
1107
1108         switch (hdev->req_status) {
1109         case HCI_REQ_DONE:
1110                 err = -bt_to_errno(hdev->req_result);
1111                 break;
1112
1113         case HCI_REQ_CANCELED:
1114                 err = -hdev->req_result;
1115                 break;
1116
1117         default:
1118                 err = -ETIMEDOUT;
1119                 break;
1120         }
1121
1122         hdev->req_status = hdev->req_result = 0;
1123
1124         BT_DBG("%s end: err %d", hdev->name, err);
1125
1126         if (err < 0)
1127                 return ERR_PTR(err);
1128
1129         return hci_get_cmd_complete(hdev, opcode, event);
1130 }
1131 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1132
1133 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1134                                const void *param, u32 timeout)
1135 {
1136         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1137 }
1138 EXPORT_SYMBOL(__hci_cmd_sync);
1139
1140 /* Execute request and wait for completion. */
1141 static int __hci_req_sync(struct hci_dev *hdev,
1142                           void (*func)(struct hci_request *req,
1143                                       unsigned long opt),
1144                           unsigned long opt, __u32 timeout)
1145 {
1146         struct hci_request req;
1147         DECLARE_WAITQUEUE(wait, current);
1148         int err = 0;
1149
1150         BT_DBG("%s start", hdev->name);
1151
1152         hci_req_init(&req, hdev);
1153
1154         hdev->req_status = HCI_REQ_PEND;
1155
1156         func(&req, opt);
1157
1158         err = hci_req_run(&req, hci_req_sync_complete);
1159         if (err < 0) {
1160                 hdev->req_status = 0;
1161
1162                 /* ENODATA means the HCI request command queue is empty.
1163                  * This can happen when a request with conditionals doesn't
1164                  * trigger any commands to be sent. This is normal behavior
1165                  * and should not trigger an error return.
1166                  */
1167                 if (err == -ENODATA)
1168                         return 0;
1169
1170                 return err;
1171         }
1172
1173         add_wait_queue(&hdev->req_wait_q, &wait);
1174         set_current_state(TASK_INTERRUPTIBLE);
1175
1176         schedule_timeout(timeout);
1177
1178         remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180         if (signal_pending(current))
1181                 return -EINTR;
1182
1183         switch (hdev->req_status) {
1184         case HCI_REQ_DONE:
1185                 err = -bt_to_errno(hdev->req_result);
1186                 break;
1187
1188         case HCI_REQ_CANCELED:
1189                 err = -hdev->req_result;
1190                 break;
1191
1192         default:
1193                 err = -ETIMEDOUT;
1194                 break;
1195         }
1196
1197         hdev->req_status = hdev->req_result = 0;
1198
1199         BT_DBG("%s end: err %d", hdev->name, err);
1200
1201         return err;
1202 }
1203
1204 static int hci_req_sync(struct hci_dev *hdev,
1205                         void (*req)(struct hci_request *req,
1206                                     unsigned long opt),
1207                         unsigned long opt, __u32 timeout)
1208 {
1209         int ret;
1210
1211         if (!test_bit(HCI_UP, &hdev->flags))
1212                 return -ENETDOWN;
1213
1214         /* Serialize all requests */
1215         hci_req_lock(hdev);
1216         ret = __hci_req_sync(hdev, req, opt, timeout);
1217         hci_req_unlock(hdev);
1218
1219         return ret;
1220 }
1221
1222 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1223 {
1224         BT_DBG("%s %ld", req->hdev->name, opt);
1225
1226         /* Reset device */
1227         set_bit(HCI_RESET, &req->hdev->flags);
1228         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1229 }
1230
1231 static void bredr_init(struct hci_request *req)
1232 {
1233         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1234
1235         /* Read Local Supported Features */
1236         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1237
1238         /* Read Local Version */
1239         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1240
1241         /* Read BD Address */
1242         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1243 }
1244
1245 static void amp_init(struct hci_request *req)
1246 {
1247         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1248
1249         /* Read Local Version */
1250         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1251
1252         /* Read Local Supported Commands */
1253         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1254
1255         /* Read Local Supported Features */
1256         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1257
1258         /* Read Local AMP Info */
1259         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1260
1261         /* Read Data Blk size */
1262         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1263
1264         /* Read Flow Control Mode */
1265         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1266
1267         /* Read Location Data */
1268         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1269 }
1270
1271 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1272 {
1273         struct hci_dev *hdev = req->hdev;
1274
1275         BT_DBG("%s %ld", hdev->name, opt);
1276
1277         /* Reset */
1278         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1279                 hci_reset_req(req, 0);
1280
1281         switch (hdev->dev_type) {
1282         case HCI_BREDR:
1283                 bredr_init(req);
1284                 break;
1285
1286         case HCI_AMP:
1287                 amp_init(req);
1288                 break;
1289
1290         default:
1291                 BT_ERR("Unknown device type %d", hdev->dev_type);
1292                 break;
1293         }
1294 }
1295
1296 static void bredr_setup(struct hci_request *req)
1297 {
1298         struct hci_dev *hdev = req->hdev;
1299
1300         __le16 param;
1301         __u8 flt_type;
1302
1303         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1304         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1305
1306         /* Read Class of Device */
1307         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1308
1309         /* Read Local Name */
1310         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1311
1312         /* Read Voice Setting */
1313         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1314
1315         /* Read Number of Supported IAC */
1316         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1317
1318         /* Read Current IAC LAP */
1319         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1320
1321         /* Clear Event Filters */
1322         flt_type = HCI_FLT_CLEAR_ALL;
1323         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1324
1325         /* Connection accept timeout ~20 secs */
1326         param = cpu_to_le16(0x7d00);
1327         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1328
1329         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330          * but it does not support page scan related HCI commands.
1331          */
1332         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1333                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1335         }
1336 }
1337
1338 static void le_setup(struct hci_request *req)
1339 {
1340         struct hci_dev *hdev = req->hdev;
1341
1342         /* Read LE Buffer Size */
1343         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1344
1345         /* Read LE Local Supported Features */
1346         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1347
1348         /* Read LE Supported States */
1349         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1350
1351         /* Read LE White List Size */
1352         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1353
1354         /* Clear LE White List */
1355         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1356
1357         /* LE-only controllers have LE implicitly enabled */
1358         if (!lmp_bredr_capable(hdev))
1359                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1360 }
1361
1362 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1363 {
1364         if (lmp_ext_inq_capable(hdev))
1365                 return 0x02;
1366
1367         if (lmp_inq_rssi_capable(hdev))
1368                 return 0x01;
1369
1370         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371             hdev->lmp_subver == 0x0757)
1372                 return 0x01;
1373
1374         if (hdev->manufacturer == 15) {
1375                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1376                         return 0x01;
1377                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1378                         return 0x01;
1379                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1380                         return 0x01;
1381         }
1382
1383         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384             hdev->lmp_subver == 0x1805)
1385                 return 0x01;
1386
1387         return 0x00;
1388 }
1389
1390 static void hci_setup_inquiry_mode(struct hci_request *req)
1391 {
1392         u8 mode;
1393
1394         mode = hci_get_inquiry_mode(req->hdev);
1395
1396         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1397 }
1398
1399 static void hci_setup_event_mask(struct hci_request *req)
1400 {
1401         struct hci_dev *hdev = req->hdev;
1402
1403         /* The second byte is 0xff instead of 0x9f (two reserved bits
1404          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405          * command otherwise.
1406          */
1407         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1408
1409         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410          * any event mask for pre 1.2 devices.
1411          */
1412         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1413                 return;
1414
1415         if (lmp_bredr_capable(hdev)) {
1416                 events[4] |= 0x01; /* Flow Specification Complete */
1417                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419                 events[5] |= 0x08; /* Synchronous Connection Complete */
1420                 events[5] |= 0x10; /* Synchronous Connection Changed */
1421         } else {
1422                 /* Use a different default for LE-only devices */
1423                 memset(events, 0, sizeof(events));
1424                 events[0] |= 0x10; /* Disconnection Complete */
1425                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426                 events[1] |= 0x20; /* Command Complete */
1427                 events[1] |= 0x40; /* Command Status */
1428                 events[1] |= 0x80; /* Hardware Error */
1429                 events[2] |= 0x04; /* Number of Completed Packets */
1430                 events[3] |= 0x02; /* Data Buffer Overflow */
1431
1432                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433                         events[0] |= 0x80; /* Encryption Change */
1434                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1435                 }
1436         }
1437
1438         if (lmp_inq_rssi_capable(hdev))
1439                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1440
1441         if (lmp_sniffsubr_capable(hdev))
1442                 events[5] |= 0x20; /* Sniff Subrating */
1443
1444         if (lmp_pause_enc_capable(hdev))
1445                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1446
1447         if (lmp_ext_inq_capable(hdev))
1448                 events[5] |= 0x40; /* Extended Inquiry Result */
1449
1450         if (lmp_no_flush_capable(hdev))
1451                 events[7] |= 0x01; /* Enhanced Flush Complete */
1452
1453         if (lmp_lsto_capable(hdev))
1454                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1455
1456         if (lmp_ssp_capable(hdev)) {
1457                 events[6] |= 0x01;      /* IO Capability Request */
1458                 events[6] |= 0x02;      /* IO Capability Response */
1459                 events[6] |= 0x04;      /* User Confirmation Request */
1460                 events[6] |= 0x08;      /* User Passkey Request */
1461                 events[6] |= 0x10;      /* Remote OOB Data Request */
1462                 events[6] |= 0x20;      /* Simple Pairing Complete */
1463                 events[7] |= 0x04;      /* User Passkey Notification */
1464                 events[7] |= 0x08;      /* Keypress Notification */
1465                 events[7] |= 0x10;      /* Remote Host Supported
1466                                          * Features Notification
1467                                          */
1468         }
1469
1470         if (lmp_le_capable(hdev))
1471                 events[7] |= 0x20;      /* LE Meta-Event */
1472
1473         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1474 }
1475
1476 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1477 {
1478         struct hci_dev *hdev = req->hdev;
1479
1480         if (lmp_bredr_capable(hdev))
1481                 bredr_setup(req);
1482         else
1483                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1484
1485         if (lmp_le_capable(hdev))
1486                 le_setup(req);
1487
1488         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489          * local supported commands HCI command.
1490          */
1491         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1492                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1493
1494         if (lmp_ssp_capable(hdev)) {
1495                 /* When SSP is available, then the host features page
1496                  * should also be available as well. However some
1497                  * controllers list the max_page as 0 as long as SSP
1498                  * has not been enabled. To achieve proper debugging
1499                  * output, force the minimum max_page to 1 at least.
1500                  */
1501                 hdev->max_page = 0x01;
1502
1503                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1504                         u8 mode = 0x01;
1505                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506                                     sizeof(mode), &mode);
1507                 } else {
1508                         struct hci_cp_write_eir cp;
1509
1510                         memset(hdev->eir, 0, sizeof(hdev->eir));
1511                         memset(&cp, 0, sizeof(cp));
1512
1513                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1514                 }
1515         }
1516
1517         if (lmp_inq_rssi_capable(hdev))
1518                 hci_setup_inquiry_mode(req);
1519
1520         if (lmp_inq_tx_pwr_capable(hdev))
1521                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1522
1523         if (lmp_ext_feat_capable(hdev)) {
1524                 struct hci_cp_read_local_ext_features cp;
1525
1526                 cp.page = 0x01;
1527                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1528                             sizeof(cp), &cp);
1529         }
1530
1531         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1532                 u8 enable = 1;
1533                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1534                             &enable);
1535         }
1536 }
1537
1538 static void hci_setup_link_policy(struct hci_request *req)
1539 {
1540         struct hci_dev *hdev = req->hdev;
1541         struct hci_cp_write_def_link_policy cp;
1542         u16 link_policy = 0;
1543
1544         if (lmp_rswitch_capable(hdev))
1545                 link_policy |= HCI_LP_RSWITCH;
1546         if (lmp_hold_capable(hdev))
1547                 link_policy |= HCI_LP_HOLD;
1548         if (lmp_sniff_capable(hdev))
1549                 link_policy |= HCI_LP_SNIFF;
1550         if (lmp_park_capable(hdev))
1551                 link_policy |= HCI_LP_PARK;
1552
1553         cp.policy = cpu_to_le16(link_policy);
1554         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1555 }
1556
1557 static void hci_set_le_support(struct hci_request *req)
1558 {
1559         struct hci_dev *hdev = req->hdev;
1560         struct hci_cp_write_le_host_supported cp;
1561
1562         /* LE-only devices do not support explicit enablement */
1563         if (!lmp_bredr_capable(hdev))
1564                 return;
1565
1566         memset(&cp, 0, sizeof(cp));
1567
1568         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1569                 cp.le = 0x01;
1570                 cp.simul = lmp_le_br_capable(hdev);
1571         }
1572
1573         if (cp.le != lmp_host_le_capable(hdev))
1574                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1575                             &cp);
1576 }
1577
1578 static void hci_set_event_mask_page_2(struct hci_request *req)
1579 {
1580         struct hci_dev *hdev = req->hdev;
1581         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1582
1583         /* If Connectionless Slave Broadcast master role is supported
1584          * enable all necessary events for it.
1585          */
1586         if (lmp_csb_master_capable(hdev)) {
1587                 events[1] |= 0x40;      /* Triggered Clock Capture */
1588                 events[1] |= 0x80;      /* Synchronization Train Complete */
1589                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1590                 events[2] |= 0x20;      /* CSB Channel Map Change */
1591         }
1592
1593         /* If Connectionless Slave Broadcast slave role is supported
1594          * enable all necessary events for it.
1595          */
1596         if (lmp_csb_slave_capable(hdev)) {
1597                 events[2] |= 0x01;      /* Synchronization Train Received */
1598                 events[2] |= 0x02;      /* CSB Receive */
1599                 events[2] |= 0x04;      /* CSB Timeout */
1600                 events[2] |= 0x08;      /* Truncated Page Complete */
1601         }
1602
1603         /* Enable Authenticated Payload Timeout Expired event if supported */
1604         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1605                 events[2] |= 0x80;
1606
1607         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1608 }
1609
1610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1611 {
1612         struct hci_dev *hdev = req->hdev;
1613         u8 p;
1614
1615         hci_setup_event_mask(req);
1616
1617         /* Some Broadcom based Bluetooth controllers do not support the
1618          * Delete Stored Link Key command. They are clearly indicating its
1619          * absence in the bit mask of supported commands.
1620          *
1621          * Check the supported commands and only if the the command is marked
1622          * as supported send it. If not supported assume that the controller
1623          * does not have actual support for stored link keys which makes this
1624          * command redundant anyway.
1625          *
1626          * Some controllers indicate that they support handling deleting
1627          * stored link keys, but they don't. The quirk lets a driver
1628          * just disable this command.
1629          */
1630         if (hdev->commands[6] & 0x80 &&
1631             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1632                 struct hci_cp_delete_stored_link_key cp;
1633
1634                 bacpy(&cp.bdaddr, BDADDR_ANY);
1635                 cp.delete_all = 0x01;
1636                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1637                             sizeof(cp), &cp);
1638         }
1639
1640         if (hdev->commands[5] & 0x10)
1641                 hci_setup_link_policy(req);
1642
1643         if (lmp_le_capable(hdev)) {
1644                 u8 events[8];
1645
1646                 memset(events, 0, sizeof(events));
1647                 events[0] = 0x0f;
1648
1649                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1650                         events[0] |= 0x10;      /* LE Long Term Key Request */
1651
1652                 /* If controller supports the Connection Parameters Request
1653                  * Link Layer Procedure, enable the corresponding event.
1654                  */
1655                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1656                         events[0] |= 0x20;      /* LE Remote Connection
1657                                                  * Parameter Request
1658                                                  */
1659
1660                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1661                             events);
1662
1663                 if (hdev->commands[25] & 0x40) {
1664                         /* Read LE Advertising Channel TX Power */
1665                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1666                 }
1667
1668                 hci_set_le_support(req);
1669         }
1670
1671         /* Read features beyond page 1 if available */
1672         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1673                 struct hci_cp_read_local_ext_features cp;
1674
1675                 cp.page = p;
1676                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1677                             sizeof(cp), &cp);
1678         }
1679 }
1680
1681 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1682 {
1683         struct hci_dev *hdev = req->hdev;
1684
1685         /* Set event mask page 2 if the HCI command for it is supported */
1686         if (hdev->commands[22] & 0x04)
1687                 hci_set_event_mask_page_2(req);
1688
1689         /* Check for Synchronization Train support */
1690         if (lmp_sync_train_capable(hdev))
1691                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1692
1693         /* Enable Secure Connections if supported and configured */
1694         if ((lmp_sc_capable(hdev) ||
1695              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1696             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1697                 u8 support = 0x01;
1698                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1699                             sizeof(support), &support);
1700         }
1701 }
1702
1703 static int __hci_init(struct hci_dev *hdev)
1704 {
1705         int err;
1706
1707         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1708         if (err < 0)
1709                 return err;
1710
1711         /* The Device Under Test (DUT) mode is special and available for
1712          * all controller types. So just create it early on.
1713          */
1714         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1715                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1716                                     &dut_mode_fops);
1717         }
1718
1719         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1720          * BR/EDR/LE type controllers. AMP controllers only need the
1721          * first stage init.
1722          */
1723         if (hdev->dev_type != HCI_BREDR)
1724                 return 0;
1725
1726         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1727         if (err < 0)
1728                 return err;
1729
1730         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1731         if (err < 0)
1732                 return err;
1733
1734         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1735         if (err < 0)
1736                 return err;
1737
1738         /* Only create debugfs entries during the initial setup
1739          * phase and not every time the controller gets powered on.
1740          */
1741         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1742                 return 0;
1743
1744         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1745                             &features_fops);
1746         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1747                            &hdev->manufacturer);
1748         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1749         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1750         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1751                             &blacklist_fops);
1752         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1753                             &whitelist_fops);
1754         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1755
1756         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1757                             &conn_info_min_age_fops);
1758         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1759                             &conn_info_max_age_fops);
1760
1761         if (lmp_bredr_capable(hdev)) {
1762                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1763                                     hdev, &inquiry_cache_fops);
1764                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1765                                     hdev, &link_keys_fops);
1766                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1767                                     hdev, &dev_class_fops);
1768                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1769                                     hdev, &voice_setting_fops);
1770         }
1771
1772         if (lmp_ssp_capable(hdev)) {
1773                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1774                                     hdev, &auto_accept_delay_fops);
1775                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1776                                     hdev, &force_sc_support_fops);
1777                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1778                                     hdev, &sc_only_mode_fops);
1779         }
1780
1781         if (lmp_sniff_capable(hdev)) {
1782                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1783                                     hdev, &idle_timeout_fops);
1784                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1785                                     hdev, &sniff_min_interval_fops);
1786                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1787                                     hdev, &sniff_max_interval_fops);
1788         }
1789
1790         if (lmp_le_capable(hdev)) {
1791                 debugfs_create_file("identity", 0400, hdev->debugfs,
1792                                     hdev, &identity_fops);
1793                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1794                                     hdev, &rpa_timeout_fops);
1795                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1796                                     hdev, &random_address_fops);
1797                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1798                                     hdev, &static_address_fops);
1799
1800                 /* For controllers with a public address, provide a debug
1801                  * option to force the usage of the configured static
1802                  * address. By default the public address is used.
1803                  */
1804                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1805                         debugfs_create_file("force_static_address", 0644,
1806                                             hdev->debugfs, hdev,
1807                                             &force_static_address_fops);
1808
1809                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1810                                   &hdev->le_white_list_size);
1811                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1812                                     &white_list_fops);
1813                 debugfs_create_file("identity_resolving_keys", 0400,
1814                                     hdev->debugfs, hdev,
1815                                     &identity_resolving_keys_fops);
1816                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1817                                     hdev, &long_term_keys_fops);
1818                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1819                                     hdev, &conn_min_interval_fops);
1820                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1821                                     hdev, &conn_max_interval_fops);
1822                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1823                                     hdev, &conn_latency_fops);
1824                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1825                                     hdev, &supervision_timeout_fops);
1826                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1827                                     hdev, &adv_channel_map_fops);
1828                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1829                                     &device_list_fops);
1830                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1831                                    hdev->debugfs,
1832                                    &hdev->discov_interleaved_timeout);
1833         }
1834
1835         return 0;
1836 }
1837
1838 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1839 {
1840         struct hci_dev *hdev = req->hdev;
1841
1842         BT_DBG("%s %ld", hdev->name, opt);
1843
1844         /* Reset */
1845         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1846                 hci_reset_req(req, 0);
1847
1848         /* Read Local Version */
1849         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1850
1851         /* Read BD Address */
1852         if (hdev->set_bdaddr)
1853                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1854 }
1855
1856 static int __hci_unconf_init(struct hci_dev *hdev)
1857 {
1858         int err;
1859
1860         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1861                 return 0;
1862
1863         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1864         if (err < 0)
1865                 return err;
1866
1867         return 0;
1868 }
1869
1870 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1871 {
1872         __u8 scan = opt;
1873
1874         BT_DBG("%s %x", req->hdev->name, scan);
1875
1876         /* Inquiry and Page scans */
1877         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1878 }
1879
1880 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1881 {
1882         __u8 auth = opt;
1883
1884         BT_DBG("%s %x", req->hdev->name, auth);
1885
1886         /* Authentication */
1887         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1888 }
1889
1890 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1891 {
1892         __u8 encrypt = opt;
1893
1894         BT_DBG("%s %x", req->hdev->name, encrypt);
1895
1896         /* Encryption */
1897         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1898 }
1899
1900 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1901 {
1902         __le16 policy = cpu_to_le16(opt);
1903
1904         BT_DBG("%s %x", req->hdev->name, policy);
1905
1906         /* Default link policy */
1907         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1908 }
1909
1910 /* Get HCI device by index.
1911  * Device is held on return. */
1912 struct hci_dev *hci_dev_get(int index)
1913 {
1914         struct hci_dev *hdev = NULL, *d;
1915
1916         BT_DBG("%d", index);
1917
1918         if (index < 0)
1919                 return NULL;
1920
1921         read_lock(&hci_dev_list_lock);
1922         list_for_each_entry(d, &hci_dev_list, list) {
1923                 if (d->id == index) {
1924                         hdev = hci_dev_hold(d);
1925                         break;
1926                 }
1927         }
1928         read_unlock(&hci_dev_list_lock);
1929         return hdev;
1930 }
1931
1932 /* ---- Inquiry support ---- */
1933
1934 bool hci_discovery_active(struct hci_dev *hdev)
1935 {
1936         struct discovery_state *discov = &hdev->discovery;
1937
1938         switch (discov->state) {
1939         case DISCOVERY_FINDING:
1940         case DISCOVERY_RESOLVING:
1941                 return true;
1942
1943         default:
1944                 return false;
1945         }
1946 }
1947
1948 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1949 {
1950         int old_state = hdev->discovery.state;
1951
1952         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1953
1954         if (old_state == state)
1955                 return;
1956
1957         hdev->discovery.state = state;
1958
1959         switch (state) {
1960         case DISCOVERY_STOPPED:
1961                 hci_update_background_scan(hdev);
1962
1963                 if (old_state != DISCOVERY_STARTING)
1964                         mgmt_discovering(hdev, 0);
1965                 break;
1966         case DISCOVERY_STARTING:
1967                 break;
1968         case DISCOVERY_FINDING:
1969                 mgmt_discovering(hdev, 1);
1970                 break;
1971         case DISCOVERY_RESOLVING:
1972                 break;
1973         case DISCOVERY_STOPPING:
1974                 break;
1975         }
1976 }
1977
1978 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1979 {
1980         struct discovery_state *cache = &hdev->discovery;
1981         struct inquiry_entry *p, *n;
1982
1983         list_for_each_entry_safe(p, n, &cache->all, all) {
1984                 list_del(&p->all);
1985                 kfree(p);
1986         }
1987
1988         INIT_LIST_HEAD(&cache->unknown);
1989         INIT_LIST_HEAD(&cache->resolve);
1990 }
1991
1992 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1993                                                bdaddr_t *bdaddr)
1994 {
1995         struct discovery_state *cache = &hdev->discovery;
1996         struct inquiry_entry *e;
1997
1998         BT_DBG("cache %p, %pMR", cache, bdaddr);
1999
2000         list_for_each_entry(e, &cache->all, all) {
2001                 if (!bacmp(&e->data.bdaddr, bdaddr))
2002                         return e;
2003         }
2004
2005         return NULL;
2006 }
2007
2008 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2009                                                        bdaddr_t *bdaddr)
2010 {
2011         struct discovery_state *cache = &hdev->discovery;
2012         struct inquiry_entry *e;
2013
2014         BT_DBG("cache %p, %pMR", cache, bdaddr);
2015
2016         list_for_each_entry(e, &cache->unknown, list) {
2017                 if (!bacmp(&e->data.bdaddr, bdaddr))
2018                         return e;
2019         }
2020
2021         return NULL;
2022 }
2023
2024 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2025                                                        bdaddr_t *bdaddr,
2026                                                        int state)
2027 {
2028         struct discovery_state *cache = &hdev->discovery;
2029         struct inquiry_entry *e;
2030
2031         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2032
2033         list_for_each_entry(e, &cache->resolve, list) {
2034                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2035                         return e;
2036                 if (!bacmp(&e->data.bdaddr, bdaddr))
2037                         return e;
2038         }
2039
2040         return NULL;
2041 }
2042
2043 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2044                                       struct inquiry_entry *ie)
2045 {
2046         struct discovery_state *cache = &hdev->discovery;
2047         struct list_head *pos = &cache->resolve;
2048         struct inquiry_entry *p;
2049
2050         list_del(&ie->list);
2051
2052         list_for_each_entry(p, &cache->resolve, list) {
2053                 if (p->name_state != NAME_PENDING &&
2054                     abs(p->data.rssi) >= abs(ie->data.rssi))
2055                         break;
2056                 pos = &p->list;
2057         }
2058
2059         list_add(&ie->list, pos);
2060 }
2061
2062 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2063                              bool name_known)
2064 {
2065         struct discovery_state *cache = &hdev->discovery;
2066         struct inquiry_entry *ie;
2067         u32 flags = 0;
2068
2069         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2070
2071         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2072
2073         if (!data->ssp_mode)
2074                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2075
2076         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2077         if (ie) {
2078                 if (!ie->data.ssp_mode)
2079                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2080
2081                 if (ie->name_state == NAME_NEEDED &&
2082                     data->rssi != ie->data.rssi) {
2083                         ie->data.rssi = data->rssi;
2084                         hci_inquiry_cache_update_resolve(hdev, ie);
2085                 }
2086
2087                 goto update;
2088         }
2089
2090         /* Entry not in the cache. Add new one. */
2091         ie = kzalloc(sizeof(struct inquiry_entry), GFP_KERNEL);
2092         if (!ie) {
2093                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2094                 goto done;
2095         }
2096
2097         list_add(&ie->all, &cache->all);
2098
2099         if (name_known) {
2100                 ie->name_state = NAME_KNOWN;
2101         } else {
2102                 ie->name_state = NAME_NOT_KNOWN;
2103                 list_add(&ie->list, &cache->unknown);
2104         }
2105
2106 update:
2107         if (name_known && ie->name_state != NAME_KNOWN &&
2108             ie->name_state != NAME_PENDING) {
2109                 ie->name_state = NAME_KNOWN;
2110                 list_del(&ie->list);
2111         }
2112
2113         memcpy(&ie->data, data, sizeof(*data));
2114         ie->timestamp = jiffies;
2115         cache->timestamp = jiffies;
2116
2117         if (ie->name_state == NAME_NOT_KNOWN)
2118                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2119
2120 done:
2121         return flags;
2122 }
2123
2124 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2125 {
2126         struct discovery_state *cache = &hdev->discovery;
2127         struct inquiry_info *info = (struct inquiry_info *) buf;
2128         struct inquiry_entry *e;
2129         int copied = 0;
2130
2131         list_for_each_entry(e, &cache->all, all) {
2132                 struct inquiry_data *data = &e->data;
2133
2134                 if (copied >= num)
2135                         break;
2136
2137                 bacpy(&info->bdaddr, &data->bdaddr);
2138                 info->pscan_rep_mode    = data->pscan_rep_mode;
2139                 info->pscan_period_mode = data->pscan_period_mode;
2140                 info->pscan_mode        = data->pscan_mode;
2141                 memcpy(info->dev_class, data->dev_class, 3);
2142                 info->clock_offset      = data->clock_offset;
2143
2144                 info++;
2145                 copied++;
2146         }
2147
2148         BT_DBG("cache %p, copied %d", cache, copied);
2149         return copied;
2150 }
2151
2152 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2153 {
2154         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2155         struct hci_dev *hdev = req->hdev;
2156         struct hci_cp_inquiry cp;
2157
2158         BT_DBG("%s", hdev->name);
2159
2160         if (test_bit(HCI_INQUIRY, &hdev->flags))
2161                 return;
2162
2163         /* Start Inquiry */
2164         memcpy(&cp.lap, &ir->lap, 3);
2165         cp.length  = ir->length;
2166         cp.num_rsp = ir->num_rsp;
2167         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2168 }
2169
2170 static int wait_inquiry(void *word)
2171 {
2172         schedule();
2173         return signal_pending(current);
2174 }
2175
2176 int hci_inquiry(void __user *arg)
2177 {
2178         __u8 __user *ptr = arg;
2179         struct hci_inquiry_req ir;
2180         struct hci_dev *hdev;
2181         int err = 0, do_inquiry = 0, max_rsp;
2182         long timeo;
2183         __u8 *buf;
2184
2185         if (copy_from_user(&ir, ptr, sizeof(ir)))
2186                 return -EFAULT;
2187
2188         hdev = hci_dev_get(ir.dev_id);
2189         if (!hdev)
2190                 return -ENODEV;
2191
2192         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2193                 err = -EBUSY;
2194                 goto done;
2195         }
2196
2197         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2198                 err = -EOPNOTSUPP;
2199                 goto done;
2200         }
2201
2202         if (hdev->dev_type != HCI_BREDR) {
2203                 err = -EOPNOTSUPP;
2204                 goto done;
2205         }
2206
2207         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2208                 err = -EOPNOTSUPP;
2209                 goto done;
2210         }
2211
2212         hci_dev_lock(hdev);
2213         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2214             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2215                 hci_inquiry_cache_flush(hdev);
2216                 do_inquiry = 1;
2217         }
2218         hci_dev_unlock(hdev);
2219
2220         timeo = ir.length * msecs_to_jiffies(2000);
2221
2222         if (do_inquiry) {
2223                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2224                                    timeo);
2225                 if (err < 0)
2226                         goto done;
2227
2228                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2229                  * cleared). If it is interrupted by a signal, return -EINTR.
2230                  */
2231                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2232                                 TASK_INTERRUPTIBLE))
2233                         return -EINTR;
2234         }
2235
2236         /* for unlimited number of responses we will use buffer with
2237          * 255 entries
2238          */
2239         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2240
2241         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2242          * copy it to the user space.
2243          */
2244         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2245         if (!buf) {
2246                 err = -ENOMEM;
2247                 goto done;
2248         }
2249
2250         hci_dev_lock(hdev);
2251         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2252         hci_dev_unlock(hdev);
2253
2254         BT_DBG("num_rsp %d", ir.num_rsp);
2255
2256         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2257                 ptr += sizeof(ir);
2258                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2259                                  ir.num_rsp))
2260                         err = -EFAULT;
2261         } else
2262                 err = -EFAULT;
2263
2264         kfree(buf);
2265
2266 done:
2267         hci_dev_put(hdev);
2268         return err;
2269 }
2270
2271 static int hci_dev_do_open(struct hci_dev *hdev)
2272 {
2273         int ret = 0;
2274
2275         BT_DBG("%s %p", hdev->name, hdev);
2276
2277         hci_req_lock(hdev);
2278
2279         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2280                 ret = -ENODEV;
2281                 goto done;
2282         }
2283
2284         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2285             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2286                 /* Check for rfkill but allow the HCI setup stage to
2287                  * proceed (which in itself doesn't cause any RF activity).
2288                  */
2289                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2290                         ret = -ERFKILL;
2291                         goto done;
2292                 }
2293
2294                 /* Check for valid public address or a configured static
2295                  * random adddress, but let the HCI setup proceed to
2296                  * be able to determine if there is a public address
2297                  * or not.
2298                  *
2299                  * In case of user channel usage, it is not important
2300                  * if a public address or static random address is
2301                  * available.
2302                  *
2303                  * This check is only valid for BR/EDR controllers
2304                  * since AMP controllers do not have an address.
2305                  */
2306                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2307                     hdev->dev_type == HCI_BREDR &&
2308                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2309                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2310                         ret = -EADDRNOTAVAIL;
2311                         goto done;
2312                 }
2313         }
2314
2315         if (test_bit(HCI_UP, &hdev->flags)) {
2316                 ret = -EALREADY;
2317                 goto done;
2318         }
2319
2320         if (hdev->open(hdev)) {
2321                 ret = -EIO;
2322                 goto done;
2323         }
2324
2325         atomic_set(&hdev->cmd_cnt, 1);
2326         set_bit(HCI_INIT, &hdev->flags);
2327
2328         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2329                 if (hdev->setup)
2330                         ret = hdev->setup(hdev);
2331
2332                 /* The transport driver can set these quirks before
2333                  * creating the HCI device or in its setup callback.
2334                  *
2335                  * In case any of them is set, the controller has to
2336                  * start up as unconfigured.
2337                  */
2338                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2339                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2340                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2341
2342                 /* For an unconfigured controller it is required to
2343                  * read at least the version information provided by
2344                  * the Read Local Version Information command.
2345                  *
2346                  * If the set_bdaddr driver callback is provided, then
2347                  * also the original Bluetooth public device address
2348                  * will be read using the Read BD Address command.
2349                  */
2350                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2351                         ret = __hci_unconf_init(hdev);
2352         }
2353
2354         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2355                 /* If public address change is configured, ensure that
2356                  * the address gets programmed. If the driver does not
2357                  * support changing the public address, fail the power
2358                  * on procedure.
2359                  */
2360                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2361                     hdev->set_bdaddr)
2362                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2363                 else
2364                         ret = -EADDRNOTAVAIL;
2365         }
2366
2367         if (!ret) {
2368                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2369                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2370                         ret = __hci_init(hdev);
2371         }
2372
2373         clear_bit(HCI_INIT, &hdev->flags);
2374
2375         if (!ret) {
2376                 hci_dev_hold(hdev);
2377                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2378                 set_bit(HCI_UP, &hdev->flags);
2379                 hci_notify(hdev, HCI_DEV_UP);
2380                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2381                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2382                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2383                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2384                     hdev->dev_type == HCI_BREDR) {
2385                         hci_dev_lock(hdev);
2386                         mgmt_powered(hdev, 1);
2387                         hci_dev_unlock(hdev);
2388                 }
2389         } else {
2390                 /* Init failed, cleanup */
2391                 flush_work(&hdev->tx_work);
2392                 flush_work(&hdev->cmd_work);
2393                 flush_work(&hdev->rx_work);
2394
2395                 skb_queue_purge(&hdev->cmd_q);
2396                 skb_queue_purge(&hdev->rx_q);
2397
2398                 if (hdev->flush)
2399                         hdev->flush(hdev);
2400
2401                 if (hdev->sent_cmd) {
2402                         kfree_skb(hdev->sent_cmd);
2403                         hdev->sent_cmd = NULL;
2404                 }
2405
2406                 hdev->close(hdev);
2407                 hdev->flags &= BIT(HCI_RAW);
2408         }
2409
2410 done:
2411         hci_req_unlock(hdev);
2412         return ret;
2413 }
2414
2415 /* ---- HCI ioctl helpers ---- */
2416
2417 int hci_dev_open(__u16 dev)
2418 {
2419         struct hci_dev *hdev;
2420         int err;
2421
2422         hdev = hci_dev_get(dev);
2423         if (!hdev)
2424                 return -ENODEV;
2425
2426         /* Devices that are marked as unconfigured can only be powered
2427          * up as user channel. Trying to bring them up as normal devices
2428          * will result into a failure. Only user channel operation is
2429          * possible.
2430          *
2431          * When this function is called for a user channel, the flag
2432          * HCI_USER_CHANNEL will be set first before attempting to
2433          * open the device.
2434          */
2435         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2436             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2437                 err = -EOPNOTSUPP;
2438                 goto done;
2439         }
2440
2441         /* We need to ensure that no other power on/off work is pending
2442          * before proceeding to call hci_dev_do_open. This is
2443          * particularly important if the setup procedure has not yet
2444          * completed.
2445          */
2446         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2447                 cancel_delayed_work(&hdev->power_off);
2448
2449         /* After this call it is guaranteed that the setup procedure
2450          * has finished. This means that error conditions like RFKILL
2451          * or no valid public or static random address apply.
2452          */
2453         flush_workqueue(hdev->req_workqueue);
2454
2455         /* For controllers not using the management interface and that
2456          * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2457          * so that pairing works for them. Once the management interface
2458          * is in use this bit will be cleared again and userspace has
2459          * to explicitly enable it.
2460          */
2461         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2462             !test_bit(HCI_MGMT, &hdev->dev_flags))
2463                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2464
2465         err = hci_dev_do_open(hdev);
2466
2467 done:
2468         hci_dev_put(hdev);
2469         return err;
2470 }
2471
2472 /* This function requires the caller holds hdev->lock */
2473 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2474 {
2475         struct hci_conn_params *p;
2476
2477         list_for_each_entry(p, &hdev->le_conn_params, list)
2478                 list_del_init(&p->action);
2479
2480         BT_DBG("All LE pending actions cleared");
2481 }
2482
2483 static int hci_dev_do_close(struct hci_dev *hdev)
2484 {
2485         BT_DBG("%s %p", hdev->name, hdev);
2486
2487         cancel_delayed_work(&hdev->power_off);
2488
2489         hci_req_cancel(hdev, ENODEV);
2490         hci_req_lock(hdev);
2491
2492         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2493                 cancel_delayed_work_sync(&hdev->cmd_timer);
2494                 hci_req_unlock(hdev);
2495                 return 0;
2496         }
2497
2498         /* Flush RX and TX works */
2499         flush_work(&hdev->tx_work);
2500         flush_work(&hdev->rx_work);
2501
2502         if (hdev->discov_timeout > 0) {
2503                 cancel_delayed_work(&hdev->discov_off);
2504                 hdev->discov_timeout = 0;
2505                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2506                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2507         }
2508
2509         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2510                 cancel_delayed_work(&hdev->service_cache);
2511
2512         cancel_delayed_work_sync(&hdev->le_scan_disable);
2513
2514         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2515                 cancel_delayed_work_sync(&hdev->rpa_expired);
2516
2517         hci_dev_lock(hdev);
2518         hci_inquiry_cache_flush(hdev);
2519         hci_conn_hash_flush(hdev);
2520         hci_pend_le_actions_clear(hdev);
2521         hci_dev_unlock(hdev);
2522
2523         hci_notify(hdev, HCI_DEV_DOWN);
2524
2525         if (hdev->flush)
2526                 hdev->flush(hdev);
2527
2528         /* Reset device */
2529         skb_queue_purge(&hdev->cmd_q);
2530         atomic_set(&hdev->cmd_cnt, 1);
2531         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2532             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2533             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2534                 set_bit(HCI_INIT, &hdev->flags);
2535                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2536                 clear_bit(HCI_INIT, &hdev->flags);
2537         }
2538
2539         /* flush cmd  work */
2540         flush_work(&hdev->cmd_work);
2541
2542         /* Drop queues */
2543         skb_queue_purge(&hdev->rx_q);
2544         skb_queue_purge(&hdev->cmd_q);
2545         skb_queue_purge(&hdev->raw_q);
2546
2547         /* Drop last sent command */
2548         if (hdev->sent_cmd) {
2549                 cancel_delayed_work_sync(&hdev->cmd_timer);
2550                 kfree_skb(hdev->sent_cmd);
2551                 hdev->sent_cmd = NULL;
2552         }
2553
2554         kfree_skb(hdev->recv_evt);
2555         hdev->recv_evt = NULL;
2556
2557         /* After this point our queues are empty
2558          * and no tasks are scheduled. */
2559         hdev->close(hdev);
2560
2561         /* Clear flags */
2562         hdev->flags &= BIT(HCI_RAW);
2563         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2564
2565         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2566                 if (hdev->dev_type == HCI_BREDR) {
2567                         hci_dev_lock(hdev);
2568                         mgmt_powered(hdev, 0);
2569                         hci_dev_unlock(hdev);
2570                 }
2571         }
2572
2573         /* Controller radio is available but is currently powered down */
2574         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2575
2576         memset(hdev->eir, 0, sizeof(hdev->eir));
2577         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2578         bacpy(&hdev->random_addr, BDADDR_ANY);
2579
2580         hci_req_unlock(hdev);
2581
2582         hci_dev_put(hdev);
2583         return 0;
2584 }
2585
2586 int hci_dev_close(__u16 dev)
2587 {
2588         struct hci_dev *hdev;
2589         int err;
2590
2591         hdev = hci_dev_get(dev);
2592         if (!hdev)
2593                 return -ENODEV;
2594
2595         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2596                 err = -EBUSY;
2597                 goto done;
2598         }
2599
2600         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2601                 cancel_delayed_work(&hdev->power_off);
2602
2603         err = hci_dev_do_close(hdev);
2604
2605 done:
2606         hci_dev_put(hdev);
2607         return err;
2608 }
2609
2610 int hci_dev_reset(__u16 dev)
2611 {
2612         struct hci_dev *hdev;
2613         int ret = 0;
2614
2615         hdev = hci_dev_get(dev);
2616         if (!hdev)
2617                 return -ENODEV;
2618
2619         hci_req_lock(hdev);
2620
2621         if (!test_bit(HCI_UP, &hdev->flags)) {
2622                 ret = -ENETDOWN;
2623                 goto done;
2624         }
2625
2626         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2627                 ret = -EBUSY;
2628                 goto done;
2629         }
2630
2631         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2632                 ret = -EOPNOTSUPP;
2633                 goto done;
2634         }
2635
2636         /* Drop queues */
2637         skb_queue_purge(&hdev->rx_q);
2638         skb_queue_purge(&hdev->cmd_q);
2639
2640         hci_dev_lock(hdev);
2641         hci_inquiry_cache_flush(hdev);
2642         hci_conn_hash_flush(hdev);
2643         hci_dev_unlock(hdev);
2644
2645         if (hdev->flush)
2646                 hdev->flush(hdev);
2647
2648         atomic_set(&hdev->cmd_cnt, 1);
2649         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2650
2651         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2652
2653 done:
2654         hci_req_unlock(hdev);
2655         hci_dev_put(hdev);
2656         return ret;
2657 }
2658
2659 int hci_dev_reset_stat(__u16 dev)
2660 {
2661         struct hci_dev *hdev;
2662         int ret = 0;
2663
2664         hdev = hci_dev_get(dev);
2665         if (!hdev)
2666                 return -ENODEV;
2667
2668         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2669                 ret = -EBUSY;
2670                 goto done;
2671         }
2672
2673         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2674                 ret = -EOPNOTSUPP;
2675                 goto done;
2676         }
2677
2678         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2679
2680 done:
2681         hci_dev_put(hdev);
2682         return ret;
2683 }
2684
2685 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2686 {
2687         bool conn_changed, discov_changed;
2688
2689         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2690
2691         if ((scan & SCAN_PAGE))
2692                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2693                                                  &hdev->dev_flags);
2694         else
2695                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2696                                                   &hdev->dev_flags);
2697
2698         if ((scan & SCAN_INQUIRY)) {
2699                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2700                                                    &hdev->dev_flags);
2701         } else {
2702                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2703                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2704                                                     &hdev->dev_flags);
2705         }
2706
2707         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2708                 return;
2709
2710         if (conn_changed || discov_changed) {
2711                 /* In case this was disabled through mgmt */
2712                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2713
2714                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2715                         mgmt_update_adv_data(hdev);
2716
2717                 mgmt_new_settings(hdev);
2718         }
2719 }
2720
2721 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2722 {
2723         struct hci_dev *hdev;
2724         struct hci_dev_req dr;
2725         int err = 0;
2726
2727         if (copy_from_user(&dr, arg, sizeof(dr)))
2728                 return -EFAULT;
2729
2730         hdev = hci_dev_get(dr.dev_id);
2731         if (!hdev)
2732                 return -ENODEV;
2733
2734         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2735                 err = -EBUSY;
2736                 goto done;
2737         }
2738
2739         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2740                 err = -EOPNOTSUPP;
2741                 goto done;
2742         }
2743
2744         if (hdev->dev_type != HCI_BREDR) {
2745                 err = -EOPNOTSUPP;
2746                 goto done;
2747         }
2748
2749         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2750                 err = -EOPNOTSUPP;
2751                 goto done;
2752         }
2753
2754         switch (cmd) {
2755         case HCISETAUTH:
2756                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2757                                    HCI_INIT_TIMEOUT);
2758                 break;
2759
2760         case HCISETENCRYPT:
2761                 if (!lmp_encrypt_capable(hdev)) {
2762                         err = -EOPNOTSUPP;
2763                         break;
2764                 }
2765
2766                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2767                         /* Auth must be enabled first */
2768                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2769                                            HCI_INIT_TIMEOUT);
2770                         if (err)
2771                                 break;
2772                 }
2773
2774                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2775                                    HCI_INIT_TIMEOUT);
2776                 break;
2777
2778         case HCISETSCAN:
2779                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2780                                    HCI_INIT_TIMEOUT);
2781
2782                 /* Ensure that the connectable and discoverable states
2783                  * get correctly modified as this was a non-mgmt change.
2784                  */
2785                 if (!err)
2786                         hci_update_scan_state(hdev, dr.dev_opt);
2787                 break;
2788
2789         case HCISETLINKPOL:
2790                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2791                                    HCI_INIT_TIMEOUT);
2792                 break;
2793
2794         case HCISETLINKMODE:
2795                 hdev->link_mode = ((__u16) dr.dev_opt) &
2796                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2797                 break;
2798
2799         case HCISETPTYPE:
2800                 hdev->pkt_type = (__u16) dr.dev_opt;
2801                 break;
2802
2803         case HCISETACLMTU:
2804                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2805                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2806                 break;
2807
2808         case HCISETSCOMTU:
2809                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2810                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2811                 break;
2812
2813         default:
2814                 err = -EINVAL;
2815                 break;
2816         }
2817
2818 done:
2819         hci_dev_put(hdev);
2820         return err;
2821 }
2822
2823 int hci_get_dev_list(void __user *arg)
2824 {
2825         struct hci_dev *hdev;
2826         struct hci_dev_list_req *dl;
2827         struct hci_dev_req *dr;
2828         int n = 0, size, err;
2829         __u16 dev_num;
2830
2831         if (get_user(dev_num, (__u16 __user *) arg))
2832                 return -EFAULT;
2833
2834         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2835                 return -EINVAL;
2836
2837         size = sizeof(*dl) + dev_num * sizeof(*dr);
2838
2839         dl = kzalloc(size, GFP_KERNEL);
2840         if (!dl)
2841                 return -ENOMEM;
2842
2843         dr = dl->dev_req;
2844
2845         read_lock(&hci_dev_list_lock);
2846         list_for_each_entry(hdev, &hci_dev_list, list) {
2847                 unsigned long flags = hdev->flags;
2848
2849                 /* When the auto-off is configured it means the transport
2850                  * is running, but in that case still indicate that the
2851                  * device is actually down.
2852                  */
2853                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2854                         flags &= ~BIT(HCI_UP);
2855
2856                 (dr + n)->dev_id  = hdev->id;
2857                 (dr + n)->dev_opt = flags;
2858
2859                 if (++n >= dev_num)
2860                         break;
2861         }
2862         read_unlock(&hci_dev_list_lock);
2863
2864         dl->dev_num = n;
2865         size = sizeof(*dl) + n * sizeof(*dr);
2866
2867         err = copy_to_user(arg, dl, size);
2868         kfree(dl);
2869
2870         return err ? -EFAULT : 0;
2871 }
2872
2873 int hci_get_dev_info(void __user *arg)
2874 {
2875         struct hci_dev *hdev;
2876         struct hci_dev_info di;
2877         unsigned long flags;
2878         int err = 0;
2879
2880         if (copy_from_user(&di, arg, sizeof(di)))
2881                 return -EFAULT;
2882
2883         hdev = hci_dev_get(di.dev_id);
2884         if (!hdev)
2885                 return -ENODEV;
2886
2887         /* When the auto-off is configured it means the transport
2888          * is running, but in that case still indicate that the
2889          * device is actually down.
2890          */
2891         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2892                 flags = hdev->flags & ~BIT(HCI_UP);
2893         else
2894                 flags = hdev->flags;
2895
2896         strcpy(di.name, hdev->name);
2897         di.bdaddr   = hdev->bdaddr;
2898         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2899         di.flags    = flags;
2900         di.pkt_type = hdev->pkt_type;
2901         if (lmp_bredr_capable(hdev)) {
2902                 di.acl_mtu  = hdev->acl_mtu;
2903                 di.acl_pkts = hdev->acl_pkts;
2904                 di.sco_mtu  = hdev->sco_mtu;
2905                 di.sco_pkts = hdev->sco_pkts;
2906         } else {
2907                 di.acl_mtu  = hdev->le_mtu;
2908                 di.acl_pkts = hdev->le_pkts;
2909                 di.sco_mtu  = 0;
2910                 di.sco_pkts = 0;
2911         }
2912         di.link_policy = hdev->link_policy;
2913         di.link_mode   = hdev->link_mode;
2914
2915         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2916         memcpy(&di.features, &hdev->features, sizeof(di.features));
2917
2918         if (copy_to_user(arg, &di, sizeof(di)))
2919                 err = -EFAULT;
2920
2921         hci_dev_put(hdev);
2922
2923         return err;
2924 }
2925
2926 /* ---- Interface to HCI drivers ---- */
2927
2928 static int hci_rfkill_set_block(void *data, bool blocked)
2929 {
2930         struct hci_dev *hdev = data;
2931
2932         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2933
2934         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2935                 return -EBUSY;
2936
2937         if (blocked) {
2938                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2939                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2940                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2941                         hci_dev_do_close(hdev);
2942         } else {
2943                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2944         }
2945
2946         return 0;
2947 }
2948
2949 static const struct rfkill_ops hci_rfkill_ops = {
2950         .set_block = hci_rfkill_set_block,
2951 };
2952
2953 static void hci_power_on(struct work_struct *work)
2954 {
2955         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2956         int err;
2957
2958         BT_DBG("%s", hdev->name);
2959
2960         err = hci_dev_do_open(hdev);
2961         if (err < 0) {
2962                 mgmt_set_powered_failed(hdev, err);
2963                 return;
2964         }
2965
2966         /* During the HCI setup phase, a few error conditions are
2967          * ignored and they need to be checked now. If they are still
2968          * valid, it is important to turn the device back off.
2969          */
2970         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2971             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2972             (hdev->dev_type == HCI_BREDR &&
2973              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2974              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2975                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2976                 hci_dev_do_close(hdev);
2977         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2978                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2979                                    HCI_AUTO_OFF_TIMEOUT);
2980         }
2981
2982         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2983                 /* For unconfigured devices, set the HCI_RAW flag
2984                  * so that userspace can easily identify them.
2985                  */
2986                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2987                         set_bit(HCI_RAW, &hdev->flags);
2988
2989                 /* For fully configured devices, this will send
2990                  * the Index Added event. For unconfigured devices,
2991                  * it will send Unconfigued Index Added event.
2992                  *
2993                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2994                  * and no event will be send.
2995                  */
2996                 mgmt_index_added(hdev);
2997         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2998                 /* When the controller is now configured, then it
2999                  * is important to clear the HCI_RAW flag.
3000                  */
3001                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3002                         clear_bit(HCI_RAW, &hdev->flags);
3003
3004                 /* Powering on the controller with HCI_CONFIG set only
3005                  * happens with the transition from unconfigured to
3006                  * configured. This will send the Index Added event.
3007                  */
3008                 mgmt_index_added(hdev);
3009         }
3010 }
3011
3012 static void hci_power_off(struct work_struct *work)
3013 {
3014         struct hci_dev *hdev = container_of(work, struct hci_dev,
3015                                             power_off.work);
3016
3017         BT_DBG("%s", hdev->name);
3018
3019         hci_dev_do_close(hdev);
3020 }
3021
3022 static void hci_discov_off(struct work_struct *work)
3023 {
3024         struct hci_dev *hdev;
3025
3026         hdev = container_of(work, struct hci_dev, discov_off.work);
3027
3028         BT_DBG("%s", hdev->name);
3029
3030         mgmt_discoverable_timeout(hdev);
3031 }
3032
3033 void hci_uuids_clear(struct hci_dev *hdev)
3034 {
3035         struct bt_uuid *uuid, *tmp;
3036
3037         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3038                 list_del(&uuid->list);
3039                 kfree(uuid);
3040         }
3041 }
3042
3043 void hci_link_keys_clear(struct hci_dev *hdev)
3044 {
3045         struct list_head *p, *n;
3046
3047         list_for_each_safe(p, n, &hdev->link_keys) {
3048                 struct link_key *key;
3049
3050                 key = list_entry(p, struct link_key, list);
3051
3052                 list_del(p);
3053                 kfree(key);
3054         }
3055 }
3056
3057 void hci_smp_ltks_clear(struct hci_dev *hdev)
3058 {
3059         struct smp_ltk *k, *tmp;
3060
3061         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3062                 list_del(&k->list);
3063                 kfree(k);
3064         }
3065 }
3066
3067 void hci_smp_irks_clear(struct hci_dev *hdev)
3068 {
3069         struct smp_irk *k, *tmp;
3070
3071         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3072                 list_del(&k->list);
3073                 kfree(k);
3074         }
3075 }
3076
3077 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3078 {
3079         struct link_key *k;
3080
3081         list_for_each_entry(k, &hdev->link_keys, list)
3082                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3083                         return k;
3084
3085         return NULL;
3086 }
3087
3088 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3089                                u8 key_type, u8 old_key_type)
3090 {
3091         /* Legacy key */
3092         if (key_type < 0x03)
3093                 return true;
3094
3095         /* Debug keys are insecure so don't store them persistently */
3096         if (key_type == HCI_LK_DEBUG_COMBINATION)
3097                 return false;
3098
3099         /* Changed combination key and there's no previous one */
3100         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3101                 return false;
3102
3103         /* Security mode 3 case */
3104         if (!conn)
3105                 return true;
3106
3107         /* Neither local nor remote side had no-bonding as requirement */
3108         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3109                 return true;
3110
3111         /* Local side had dedicated bonding as requirement */
3112         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3113                 return true;
3114
3115         /* Remote side had dedicated bonding as requirement */
3116         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3117                 return true;
3118
3119         /* If none of the above criteria match, then don't store the key
3120          * persistently */
3121         return false;
3122 }
3123
3124 static bool ltk_type_master(u8 type)
3125 {
3126         return (type == SMP_LTK);
3127 }
3128
3129 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3130                              bool master)
3131 {
3132         struct smp_ltk *k;
3133
3134         list_for_each_entry(k, &hdev->long_term_keys, list) {
3135                 if (k->ediv != ediv || k->rand != rand)
3136                         continue;
3137
3138                 if (ltk_type_master(k->type) != master)
3139                         continue;
3140
3141                 return k;
3142         }
3143
3144         return NULL;
3145 }
3146
3147 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3148                                      u8 addr_type, bool master)
3149 {
3150         struct smp_ltk *k;
3151
3152         list_for_each_entry(k, &hdev->long_term_keys, list)
3153                 if (addr_type == k->bdaddr_type &&
3154                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3155                     ltk_type_master(k->type) == master)
3156                         return k;
3157
3158         return NULL;
3159 }
3160
3161 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3162 {
3163         struct smp_irk *irk;
3164
3165         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3166                 if (!bacmp(&irk->rpa, rpa))
3167                         return irk;
3168         }
3169
3170         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3171                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3172                         bacpy(&irk->rpa, rpa);
3173                         return irk;
3174                 }
3175         }
3176
3177         return NULL;
3178 }
3179
3180 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3181                                      u8 addr_type)
3182 {
3183         struct smp_irk *irk;
3184
3185         /* Identity Address must be public or static random */
3186         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3187                 return NULL;
3188
3189         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3190                 if (addr_type == irk->addr_type &&
3191                     bacmp(bdaddr, &irk->bdaddr) == 0)
3192                         return irk;
3193         }
3194
3195         return NULL;
3196 }
3197
3198 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3199                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3200                                   u8 pin_len, bool *persistent)
3201 {
3202         struct link_key *key, *old_key;
3203         u8 old_key_type;
3204
3205         old_key = hci_find_link_key(hdev, bdaddr);
3206         if (old_key) {
3207                 old_key_type = old_key->type;
3208                 key = old_key;
3209         } else {
3210                 old_key_type = conn ? conn->key_type : 0xff;
3211                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3212                 if (!key)
3213                         return NULL;
3214                 list_add(&key->list, &hdev->link_keys);
3215         }
3216
3217         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3218
3219         /* Some buggy controller combinations generate a changed
3220          * combination key for legacy pairing even when there's no
3221          * previous key */
3222         if (type == HCI_LK_CHANGED_COMBINATION &&
3223             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3224                 type = HCI_LK_COMBINATION;
3225                 if (conn)
3226                         conn->key_type = type;
3227         }
3228
3229         bacpy(&key->bdaddr, bdaddr);
3230         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3231         key->pin_len = pin_len;
3232
3233         if (type == HCI_LK_CHANGED_COMBINATION)
3234                 key->type = old_key_type;
3235         else
3236                 key->type = type;
3237
3238         if (persistent)
3239                 *persistent = hci_persistent_key(hdev, conn, type,
3240                                                  old_key_type);
3241
3242         return key;
3243 }
3244
3245 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3246                             u8 addr_type, u8 type, u8 authenticated,
3247                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3248 {
3249         struct smp_ltk *key, *old_key;
3250         bool master = ltk_type_master(type);
3251
3252         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3253         if (old_key)
3254                 key = old_key;
3255         else {
3256                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3257                 if (!key)
3258                         return NULL;
3259                 list_add(&key->list, &hdev->long_term_keys);
3260         }
3261
3262         bacpy(&key->bdaddr, bdaddr);
3263         key->bdaddr_type = addr_type;
3264         memcpy(key->val, tk, sizeof(key->val));
3265         key->authenticated = authenticated;
3266         key->ediv = ediv;
3267         key->rand = rand;
3268         key->enc_size = enc_size;
3269         key->type = type;
3270
3271         return key;
3272 }
3273
3274 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3275                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3276 {
3277         struct smp_irk *irk;
3278
3279         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3280         if (!irk) {
3281                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3282                 if (!irk)
3283                         return NULL;
3284
3285                 bacpy(&irk->bdaddr, bdaddr);
3286                 irk->addr_type = addr_type;
3287
3288                 list_add(&irk->list, &hdev->identity_resolving_keys);
3289         }
3290
3291         memcpy(irk->val, val, 16);
3292         bacpy(&irk->rpa, rpa);
3293
3294         return irk;
3295 }
3296
3297 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3298 {
3299         struct link_key *key;
3300
3301         key = hci_find_link_key(hdev, bdaddr);
3302         if (!key)
3303                 return -ENOENT;
3304
3305         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3306
3307         list_del(&key->list);
3308         kfree(key);
3309
3310         return 0;
3311 }
3312
3313 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3314 {
3315         struct smp_ltk *k, *tmp;
3316         int removed = 0;
3317
3318         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3319                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3320                         continue;
3321
3322                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3323
3324                 list_del(&k->list);
3325                 kfree(k);
3326                 removed++;
3327         }
3328
3329         return removed ? 0 : -ENOENT;
3330 }
3331
3332 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3333 {
3334         struct smp_irk *k, *tmp;
3335
3336         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3337                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3338                         continue;
3339
3340                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3341
3342                 list_del(&k->list);
3343                 kfree(k);
3344         }
3345 }
3346
3347 /* HCI command timer function */
3348 static void hci_cmd_timeout(struct work_struct *work)
3349 {
3350         struct hci_dev *hdev = container_of(work, struct hci_dev,
3351                                             cmd_timer.work);
3352
3353         if (hdev->sent_cmd) {
3354                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3355                 u16 opcode = __le16_to_cpu(sent->opcode);
3356
3357                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3358         } else {
3359                 BT_ERR("%s command tx timeout", hdev->name);
3360         }
3361
3362         atomic_set(&hdev->cmd_cnt, 1);
3363         queue_work(hdev->workqueue, &hdev->cmd_work);
3364 }
3365
3366 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3367                                           bdaddr_t *bdaddr)
3368 {
3369         struct oob_data *data;
3370
3371         list_for_each_entry(data, &hdev->remote_oob_data, list)
3372                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3373                         return data;
3374
3375         return NULL;
3376 }
3377
3378 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3379 {
3380         struct oob_data *data;
3381
3382         data = hci_find_remote_oob_data(hdev, bdaddr);
3383         if (!data)
3384                 return -ENOENT;
3385
3386         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3387
3388         list_del(&data->list);
3389         kfree(data);
3390
3391         return 0;
3392 }
3393
3394 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3395 {
3396         struct oob_data *data, *n;
3397
3398         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3399                 list_del(&data->list);
3400                 kfree(data);
3401         }
3402 }
3403
3404 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3405                             u8 *hash, u8 *randomizer)
3406 {
3407         struct oob_data *data;
3408
3409         data = hci_find_remote_oob_data(hdev, bdaddr);
3410         if (!data) {
3411                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3412                 if (!data)
3413                         return -ENOMEM;
3414
3415                 bacpy(&data->bdaddr, bdaddr);
3416                 list_add(&data->list, &hdev->remote_oob_data);
3417         }
3418
3419         memcpy(data->hash192, hash, sizeof(data->hash192));
3420         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3421
3422         memset(data->hash256, 0, sizeof(data->hash256));
3423         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3424
3425         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3426
3427         return 0;
3428 }
3429
3430 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3431                                 u8 *hash192, u8 *randomizer192,
3432                                 u8 *hash256, u8 *randomizer256)
3433 {
3434         struct oob_data *data;
3435
3436         data = hci_find_remote_oob_data(hdev, bdaddr);
3437         if (!data) {
3438                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3439                 if (!data)
3440                         return -ENOMEM;
3441
3442                 bacpy(&data->bdaddr, bdaddr);
3443                 list_add(&data->list, &hdev->remote_oob_data);
3444         }
3445
3446         memcpy(data->hash192, hash192, sizeof(data->hash192));
3447         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3448
3449         memcpy(data->hash256, hash256, sizeof(data->hash256));
3450         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3451
3452         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3453
3454         return 0;
3455 }
3456
3457 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3458                                          bdaddr_t *bdaddr, u8 type)
3459 {
3460         struct bdaddr_list *b;
3461
3462         list_for_each_entry(b, bdaddr_list, list) {
3463                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3464                         return b;
3465         }
3466
3467         return NULL;
3468 }
3469
3470 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3471 {
3472         struct list_head *p, *n;
3473
3474         list_for_each_safe(p, n, bdaddr_list) {
3475                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3476
3477                 list_del(p);
3478                 kfree(b);
3479         }
3480 }
3481
3482 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3483 {
3484         struct bdaddr_list *entry;
3485
3486         if (!bacmp(bdaddr, BDADDR_ANY))
3487                 return -EBADF;
3488
3489         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3490                 return -EEXIST;
3491
3492         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3493         if (!entry)
3494                 return -ENOMEM;
3495
3496         bacpy(&entry->bdaddr, bdaddr);
3497         entry->bdaddr_type = type;
3498
3499         list_add(&entry->list, list);
3500
3501         return 0;
3502 }
3503
3504 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3505 {
3506         struct bdaddr_list *entry;
3507
3508         if (!bacmp(bdaddr, BDADDR_ANY)) {
3509                 hci_bdaddr_list_clear(list);
3510                 return 0;
3511         }
3512
3513         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3514         if (!entry)
3515                 return -ENOENT;
3516
3517         list_del(&entry->list);
3518         kfree(entry);
3519
3520         return 0;
3521 }
3522
3523 /* This function requires the caller holds hdev->lock */
3524 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3525                                                bdaddr_t *addr, u8 addr_type)
3526 {
3527         struct hci_conn_params *params;
3528
3529         /* The conn params list only contains identity addresses */
3530         if (!hci_is_identity_address(addr, addr_type))
3531                 return NULL;
3532
3533         list_for_each_entry(params, &hdev->le_conn_params, list) {
3534                 if (bacmp(&params->addr, addr) == 0 &&
3535                     params->addr_type == addr_type) {
3536                         return params;
3537                 }
3538         }
3539
3540         return NULL;
3541 }
3542
3543 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3544 {
3545         struct hci_conn *conn;
3546
3547         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3548         if (!conn)
3549                 return false;
3550
3551         if (conn->dst_type != type)
3552                 return false;
3553
3554         if (conn->state != BT_CONNECTED)
3555                 return false;
3556
3557         return true;
3558 }
3559
3560 /* This function requires the caller holds hdev->lock */
3561 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3562                                                   bdaddr_t *addr, u8 addr_type)
3563 {
3564         struct hci_conn_params *param;
3565
3566         /* The list only contains identity addresses */
3567         if (!hci_is_identity_address(addr, addr_type))
3568                 return NULL;
3569
3570         list_for_each_entry(param, list, action) {
3571                 if (bacmp(&param->addr, addr) == 0 &&
3572                     param->addr_type == addr_type)
3573                         return param;
3574         }
3575
3576         return NULL;
3577 }
3578
3579 /* This function requires the caller holds hdev->lock */
3580 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3581                                             bdaddr_t *addr, u8 addr_type)
3582 {
3583         struct hci_conn_params *params;
3584
3585         if (!hci_is_identity_address(addr, addr_type))
3586                 return NULL;
3587
3588         params = hci_conn_params_lookup(hdev, addr, addr_type);
3589         if (params)
3590                 return params;
3591
3592         params = kzalloc(sizeof(*params), GFP_KERNEL);
3593         if (!params) {
3594                 BT_ERR("Out of memory");
3595                 return NULL;
3596         }
3597
3598         bacpy(&params->addr, addr);
3599         params->addr_type = addr_type;
3600
3601         list_add(&params->list, &hdev->le_conn_params);
3602         INIT_LIST_HEAD(&params->action);
3603
3604         params->conn_min_interval = hdev->le_conn_min_interval;
3605         params->conn_max_interval = hdev->le_conn_max_interval;
3606         params->conn_latency = hdev->le_conn_latency;
3607         params->supervision_timeout = hdev->le_supv_timeout;
3608         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3609
3610         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3611
3612         return params;
3613 }
3614
3615 /* This function requires the caller holds hdev->lock */
3616 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3617                         u8 auto_connect)
3618 {
3619         struct hci_conn_params *params;
3620
3621         params = hci_conn_params_add(hdev, addr, addr_type);
3622         if (!params)
3623                 return -EIO;
3624
3625         if (params->auto_connect == auto_connect)
3626                 return 0;
3627
3628         list_del_init(&params->action);
3629
3630         switch (auto_connect) {
3631         case HCI_AUTO_CONN_DISABLED:
3632         case HCI_AUTO_CONN_LINK_LOSS:
3633                 hci_update_background_scan(hdev);
3634                 break;
3635         case HCI_AUTO_CONN_REPORT:
3636                 list_add(&params->action, &hdev->pend_le_reports);
3637                 hci_update_background_scan(hdev);
3638                 break;
3639         case HCI_AUTO_CONN_ALWAYS:
3640                 if (!is_connected(hdev, addr, addr_type)) {
3641                         list_add(&params->action, &hdev->pend_le_conns);
3642                         hci_update_background_scan(hdev);
3643                 }
3644                 break;
3645         }
3646
3647         params->auto_connect = auto_connect;
3648
3649         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3650                auto_connect);
3651
3652         return 0;
3653 }
3654
3655 /* This function requires the caller holds hdev->lock */
3656 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3657 {
3658         struct hci_conn_params *params;
3659
3660         params = hci_conn_params_lookup(hdev, addr, addr_type);
3661         if (!params)
3662                 return;
3663
3664         list_del(&params->action);
3665         list_del(&params->list);
3666         kfree(params);
3667
3668         hci_update_background_scan(hdev);
3669
3670         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3671 }
3672
3673 /* This function requires the caller holds hdev->lock */
3674 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3675 {
3676         struct hci_conn_params *params, *tmp;
3677
3678         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3679                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3680                         continue;
3681                 list_del(&params->list);
3682                 kfree(params);
3683         }
3684
3685         BT_DBG("All LE disabled connection parameters were removed");
3686 }
3687
3688 /* This function requires the caller holds hdev->lock */
3689 void hci_conn_params_clear_all(struct hci_dev *hdev)
3690 {
3691         struct hci_conn_params *params, *tmp;
3692
3693         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3694                 list_del(&params->action);
3695                 list_del(&params->list);
3696                 kfree(params);
3697         }
3698
3699         hci_update_background_scan(hdev);
3700
3701         BT_DBG("All LE connection parameters were removed");
3702 }
3703
3704 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3705 {
3706         if (status) {
3707                 BT_ERR("Failed to start inquiry: status %d", status);
3708
3709                 hci_dev_lock(hdev);
3710                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3711                 hci_dev_unlock(hdev);
3712                 return;
3713         }
3714 }
3715
3716 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3717 {
3718         /* General inquiry access code (GIAC) */
3719         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3720         struct hci_request req;
3721         struct hci_cp_inquiry cp;
3722         int err;
3723
3724         if (status) {
3725                 BT_ERR("Failed to disable LE scanning: status %d", status);
3726                 return;
3727         }
3728
3729         switch (hdev->discovery.type) {
3730         case DISCOV_TYPE_LE:
3731                 hci_dev_lock(hdev);
3732                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3733                 hci_dev_unlock(hdev);
3734                 break;
3735
3736         case DISCOV_TYPE_INTERLEAVED:
3737                 hci_req_init(&req, hdev);
3738
3739                 memset(&cp, 0, sizeof(cp));
3740                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3741                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3742                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3743
3744                 hci_dev_lock(hdev);
3745
3746                 hci_inquiry_cache_flush(hdev);
3747
3748                 err = hci_req_run(&req, inquiry_complete);
3749                 if (err) {
3750                         BT_ERR("Inquiry request failed: err %d", err);
3751                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3752                 }
3753
3754                 hci_dev_unlock(hdev);
3755                 break;
3756         }
3757 }
3758
3759 static void le_scan_disable_work(struct work_struct *work)
3760 {
3761         struct hci_dev *hdev = container_of(work, struct hci_dev,
3762                                             le_scan_disable.work);
3763         struct hci_request req;
3764         int err;
3765
3766         BT_DBG("%s", hdev->name);
3767
3768         hci_req_init(&req, hdev);
3769
3770         hci_req_add_le_scan_disable(&req);
3771
3772         err = hci_req_run(&req, le_scan_disable_work_complete);
3773         if (err)
3774                 BT_ERR("Disable LE scanning request failed: err %d", err);
3775 }
3776
3777 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3778 {
3779         struct hci_dev *hdev = req->hdev;
3780
3781         /* If we're advertising or initiating an LE connection we can't
3782          * go ahead and change the random address at this time. This is
3783          * because the eventual initiator address used for the
3784          * subsequently created connection will be undefined (some
3785          * controllers use the new address and others the one we had
3786          * when the operation started).
3787          *
3788          * In this kind of scenario skip the update and let the random
3789          * address be updated at the next cycle.
3790          */
3791         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3792             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3793                 BT_DBG("Deferring random address update");
3794                 return;
3795         }
3796
3797         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3798 }
3799
3800 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3801                               u8 *own_addr_type)
3802 {
3803         struct hci_dev *hdev = req->hdev;
3804         int err;
3805
3806         /* If privacy is enabled use a resolvable private address. If
3807          * current RPA has expired or there is something else than
3808          * the current RPA in use, then generate a new one.
3809          */
3810         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3811                 int to;
3812
3813                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3814
3815                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3816                     !bacmp(&hdev->random_addr, &hdev->rpa))
3817                         return 0;
3818
3819                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3820                 if (err < 0) {
3821                         BT_ERR("%s failed to generate new RPA", hdev->name);
3822                         return err;
3823                 }
3824
3825                 set_random_addr(req, &hdev->rpa);
3826
3827                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3828                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3829
3830                 return 0;
3831         }
3832
3833         /* In case of required privacy without resolvable private address,
3834          * use an unresolvable private address. This is useful for active
3835          * scanning and non-connectable advertising.
3836          */
3837         if (require_privacy) {
3838                 bdaddr_t urpa;
3839
3840                 get_random_bytes(&urpa, 6);
3841                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3842
3843                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3844                 set_random_addr(req, &urpa);
3845                 return 0;
3846         }
3847
3848         /* If forcing static address is in use or there is no public
3849          * address use the static address as random address (but skip
3850          * the HCI command if the current random address is already the
3851          * static one.
3852          */
3853         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3854             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3855                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3856                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3857                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3858                                     &hdev->static_addr);
3859                 return 0;
3860         }
3861
3862         /* Neither privacy nor static address is being used so use a
3863          * public address.
3864          */
3865         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3866
3867         return 0;
3868 }
3869
3870 /* Copy the Identity Address of the controller.
3871  *
3872  * If the controller has a public BD_ADDR, then by default use that one.
3873  * If this is a LE only controller without a public address, default to
3874  * the static random address.
3875  *
3876  * For debugging purposes it is possible to force controllers with a
3877  * public address to use the static random address instead.
3878  */
3879 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3880                                u8 *bdaddr_type)
3881 {
3882         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3883             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3884                 bacpy(bdaddr, &hdev->static_addr);
3885                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3886         } else {
3887                 bacpy(bdaddr, &hdev->bdaddr);
3888                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3889         }
3890 }
3891
3892 /* Alloc HCI device */
3893 struct hci_dev *hci_alloc_dev(void)
3894 {
3895         struct hci_dev *hdev;
3896
3897         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3898         if (!hdev)
3899                 return NULL;
3900
3901         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3902         hdev->esco_type = (ESCO_HV1);
3903         hdev->link_mode = (HCI_LM_ACCEPT);
3904         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3905         hdev->io_capability = 0x03;     /* No Input No Output */
3906         hdev->manufacturer = 0xffff;    /* Default to internal use */
3907         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3908         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3909
3910         hdev->sniff_max_interval = 800;
3911         hdev->sniff_min_interval = 80;
3912
3913         hdev->le_adv_channel_map = 0x07;
3914         hdev->le_scan_interval = 0x0060;
3915         hdev->le_scan_window = 0x0030;
3916         hdev->le_conn_min_interval = 0x0028;
3917         hdev->le_conn_max_interval = 0x0038;
3918         hdev->le_conn_latency = 0x0000;
3919         hdev->le_supv_timeout = 0x002a;
3920
3921         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3922         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3923         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3924         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3925
3926         mutex_init(&hdev->lock);
3927         mutex_init(&hdev->req_lock);
3928
3929         INIT_LIST_HEAD(&hdev->mgmt_pending);
3930         INIT_LIST_HEAD(&hdev->blacklist);
3931         INIT_LIST_HEAD(&hdev->whitelist);
3932         INIT_LIST_HEAD(&hdev->uuids);
3933         INIT_LIST_HEAD(&hdev->link_keys);
3934         INIT_LIST_HEAD(&hdev->long_term_keys);
3935         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3936         INIT_LIST_HEAD(&hdev->remote_oob_data);
3937         INIT_LIST_HEAD(&hdev->le_white_list);
3938         INIT_LIST_HEAD(&hdev->le_conn_params);
3939         INIT_LIST_HEAD(&hdev->pend_le_conns);
3940         INIT_LIST_HEAD(&hdev->pend_le_reports);
3941         INIT_LIST_HEAD(&hdev->conn_hash.list);
3942
3943         INIT_WORK(&hdev->rx_work, hci_rx_work);
3944         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3945         INIT_WORK(&hdev->tx_work, hci_tx_work);
3946         INIT_WORK(&hdev->power_on, hci_power_on);
3947
3948         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3949         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3950         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3951
3952         skb_queue_head_init(&hdev->rx_q);
3953         skb_queue_head_init(&hdev->cmd_q);
3954         skb_queue_head_init(&hdev->raw_q);
3955
3956         init_waitqueue_head(&hdev->req_wait_q);
3957
3958         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3959
3960         hci_init_sysfs(hdev);
3961         discovery_init(hdev);
3962
3963         return hdev;
3964 }
3965 EXPORT_SYMBOL(hci_alloc_dev);
3966
3967 /* Free HCI device */
3968 void hci_free_dev(struct hci_dev *hdev)
3969 {
3970         /* will free via device release */
3971         put_device(&hdev->dev);
3972 }
3973 EXPORT_SYMBOL(hci_free_dev);
3974
3975 /* Register HCI device */
3976 int hci_register_dev(struct hci_dev *hdev)
3977 {
3978         int id, error;
3979
3980         if (!hdev->open || !hdev->close || !hdev->send)
3981                 return -EINVAL;
3982
3983         /* Do not allow HCI_AMP devices to register at index 0,
3984          * so the index can be used as the AMP controller ID.
3985          */
3986         switch (hdev->dev_type) {
3987         case HCI_BREDR:
3988                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3989                 break;
3990         case HCI_AMP:
3991                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3992                 break;
3993         default:
3994                 return -EINVAL;
3995         }
3996
3997         if (id < 0)
3998                 return id;
3999
4000         sprintf(hdev->name, "hci%d", id);
4001         hdev->id = id;
4002
4003         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4004
4005         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4006                                           WQ_MEM_RECLAIM, 1, hdev->name);
4007         if (!hdev->workqueue) {
4008                 error = -ENOMEM;
4009                 goto err;
4010         }
4011
4012         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4013                                               WQ_MEM_RECLAIM, 1, hdev->name);
4014         if (!hdev->req_workqueue) {
4015                 destroy_workqueue(hdev->workqueue);
4016                 error = -ENOMEM;
4017                 goto err;
4018         }
4019
4020         if (!IS_ERR_OR_NULL(bt_debugfs))
4021                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4022
4023         dev_set_name(&hdev->dev, "%s", hdev->name);
4024
4025         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4026                                                CRYPTO_ALG_ASYNC);
4027         if (IS_ERR(hdev->tfm_aes)) {
4028                 BT_ERR("Unable to create crypto context");
4029                 error = PTR_ERR(hdev->tfm_aes);
4030                 hdev->tfm_aes = NULL;
4031                 goto err_wqueue;
4032         }
4033
4034         error = device_add(&hdev->dev);
4035         if (error < 0)
4036                 goto err_tfm;
4037
4038         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4039                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4040                                     hdev);
4041         if (hdev->rfkill) {
4042                 if (rfkill_register(hdev->rfkill) < 0) {
4043                         rfkill_destroy(hdev->rfkill);
4044                         hdev->rfkill = NULL;
4045                 }
4046         }
4047
4048         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4049                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4050
4051         set_bit(HCI_SETUP, &hdev->dev_flags);
4052         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4053
4054         if (hdev->dev_type == HCI_BREDR) {
4055                 /* Assume BR/EDR support until proven otherwise (such as
4056                  * through reading supported features during init.
4057                  */
4058                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4059         }
4060
4061         write_lock(&hci_dev_list_lock);
4062         list_add(&hdev->list, &hci_dev_list);
4063         write_unlock(&hci_dev_list_lock);
4064
4065         /* Devices that are marked for raw-only usage are unconfigured
4066          * and should not be included in normal operation.
4067          */
4068         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4069                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4070
4071         hci_notify(hdev, HCI_DEV_REG);
4072         hci_dev_hold(hdev);
4073
4074         queue_work(hdev->req_workqueue, &hdev->power_on);
4075
4076         return id;
4077
4078 err_tfm:
4079         crypto_free_blkcipher(hdev->tfm_aes);
4080 err_wqueue:
4081         destroy_workqueue(hdev->workqueue);
4082         destroy_workqueue(hdev->req_workqueue);
4083 err:
4084         ida_simple_remove(&hci_index_ida, hdev->id);
4085
4086         return error;
4087 }
4088 EXPORT_SYMBOL(hci_register_dev);
4089
4090 /* Unregister HCI device */
4091 void hci_unregister_dev(struct hci_dev *hdev)
4092 {
4093         int i, id;
4094
4095         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4096
4097         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4098
4099         id = hdev->id;
4100
4101         write_lock(&hci_dev_list_lock);
4102         list_del(&hdev->list);
4103         write_unlock(&hci_dev_list_lock);
4104
4105         hci_dev_do_close(hdev);
4106
4107         for (i = 0; i < NUM_REASSEMBLY; i++)
4108                 kfree_skb(hdev->reassembly[i]);
4109
4110         cancel_work_sync(&hdev->power_on);
4111
4112         if (!test_bit(HCI_INIT, &hdev->flags) &&
4113             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4114             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4115                 hci_dev_lock(hdev);
4116                 mgmt_index_removed(hdev);
4117                 hci_dev_unlock(hdev);
4118         }
4119
4120         /* mgmt_index_removed should take care of emptying the
4121          * pending list */
4122         BUG_ON(!list_empty(&hdev->mgmt_pending));
4123
4124         hci_notify(hdev, HCI_DEV_UNREG);
4125
4126         if (hdev->rfkill) {
4127                 rfkill_unregister(hdev->rfkill);
4128                 rfkill_destroy(hdev->rfkill);
4129         }
4130
4131         if (hdev->tfm_aes)
4132                 crypto_free_blkcipher(hdev->tfm_aes);
4133
4134         device_del(&hdev->dev);
4135
4136         debugfs_remove_recursive(hdev->debugfs);
4137
4138         destroy_workqueue(hdev->workqueue);
4139         destroy_workqueue(hdev->req_workqueue);
4140
4141         hci_dev_lock(hdev);
4142         hci_bdaddr_list_clear(&hdev->blacklist);
4143         hci_bdaddr_list_clear(&hdev->whitelist);
4144         hci_uuids_clear(hdev);
4145         hci_link_keys_clear(hdev);
4146         hci_smp_ltks_clear(hdev);
4147         hci_smp_irks_clear(hdev);
4148         hci_remote_oob_data_clear(hdev);
4149         hci_bdaddr_list_clear(&hdev->le_white_list);
4150         hci_conn_params_clear_all(hdev);
4151         hci_dev_unlock(hdev);
4152
4153         hci_dev_put(hdev);
4154
4155         ida_simple_remove(&hci_index_ida, id);
4156 }
4157 EXPORT_SYMBOL(hci_unregister_dev);
4158
4159 /* Suspend HCI device */
4160 int hci_suspend_dev(struct hci_dev *hdev)
4161 {
4162         hci_notify(hdev, HCI_DEV_SUSPEND);
4163         return 0;
4164 }
4165 EXPORT_SYMBOL(hci_suspend_dev);
4166
4167 /* Resume HCI device */
4168 int hci_resume_dev(struct hci_dev *hdev)
4169 {
4170         hci_notify(hdev, HCI_DEV_RESUME);
4171         return 0;
4172 }
4173 EXPORT_SYMBOL(hci_resume_dev);
4174
4175 /* Receive frame from HCI drivers */
4176 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4177 {
4178         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4179                       && !test_bit(HCI_INIT, &hdev->flags))) {
4180                 kfree_skb(skb);
4181                 return -ENXIO;
4182         }
4183
4184         /* Incoming skb */
4185         bt_cb(skb)->incoming = 1;
4186
4187         /* Time stamp */
4188         __net_timestamp(skb);
4189
4190         skb_queue_tail(&hdev->rx_q, skb);
4191         queue_work(hdev->workqueue, &hdev->rx_work);
4192
4193         return 0;
4194 }
4195 EXPORT_SYMBOL(hci_recv_frame);
4196
4197 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4198                           int count, __u8 index)
4199 {
4200         int len = 0;
4201         int hlen = 0;
4202         int remain = count;
4203         struct sk_buff *skb;
4204         struct bt_skb_cb *scb;
4205
4206         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4207             index >= NUM_REASSEMBLY)
4208                 return -EILSEQ;
4209
4210         skb = hdev->reassembly[index];
4211
4212         if (!skb) {
4213                 switch (type) {
4214                 case HCI_ACLDATA_PKT:
4215                         len = HCI_MAX_FRAME_SIZE;
4216                         hlen = HCI_ACL_HDR_SIZE;
4217                         break;
4218                 case HCI_EVENT_PKT:
4219                         len = HCI_MAX_EVENT_SIZE;
4220                         hlen = HCI_EVENT_HDR_SIZE;
4221                         break;
4222                 case HCI_SCODATA_PKT:
4223                         len = HCI_MAX_SCO_SIZE;
4224                         hlen = HCI_SCO_HDR_SIZE;
4225                         break;
4226                 }
4227
4228                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4229                 if (!skb)
4230                         return -ENOMEM;
4231
4232                 scb = (void *) skb->cb;
4233                 scb->expect = hlen;
4234                 scb->pkt_type = type;
4235
4236                 hdev->reassembly[index] = skb;
4237         }
4238
4239         while (count) {
4240                 scb = (void *) skb->cb;
4241                 len = min_t(uint, scb->expect, count);
4242
4243                 memcpy(skb_put(skb, len), data, len);
4244
4245                 count -= len;
4246                 data += len;
4247                 scb->expect -= len;
4248                 remain = count;
4249
4250                 switch (type) {
4251                 case HCI_EVENT_PKT:
4252                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4253                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4254                                 scb->expect = h->plen;
4255
4256                                 if (skb_tailroom(skb) < scb->expect) {
4257                                         kfree_skb(skb);
4258                                         hdev->reassembly[index] = NULL;
4259                                         return -ENOMEM;
4260                                 }
4261                         }
4262                         break;
4263
4264                 case HCI_ACLDATA_PKT:
4265                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4266                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4267                                 scb->expect = __le16_to_cpu(h->dlen);
4268
4269                                 if (skb_tailroom(skb) < scb->expect) {
4270                                         kfree_skb(skb);
4271                                         hdev->reassembly[index] = NULL;
4272                                         return -ENOMEM;
4273                                 }
4274                         }
4275                         break;
4276
4277                 case HCI_SCODATA_PKT:
4278                         if (skb->len == HCI_SCO_HDR_SIZE) {
4279                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4280                                 scb->expect = h->dlen;
4281
4282                                 if (skb_tailroom(skb) < scb->expect) {
4283                                         kfree_skb(skb);
4284                                         hdev->reassembly[index] = NULL;
4285                                         return -ENOMEM;
4286                                 }
4287                         }
4288                         break;
4289                 }
4290
4291                 if (scb->expect == 0) {
4292                         /* Complete frame */
4293
4294                         bt_cb(skb)->pkt_type = type;
4295                         hci_recv_frame(hdev, skb);
4296
4297                         hdev->reassembly[index] = NULL;
4298                         return remain;
4299                 }
4300         }
4301
4302         return remain;
4303 }
4304
4305 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4306 {
4307         int rem = 0;
4308
4309         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4310                 return -EILSEQ;
4311
4312         while (count) {
4313                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4314                 if (rem < 0)
4315                         return rem;
4316
4317                 data += (count - rem);
4318                 count = rem;
4319         }
4320
4321         return rem;
4322 }
4323 EXPORT_SYMBOL(hci_recv_fragment);
4324
4325 #define STREAM_REASSEMBLY 0
4326
4327 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4328 {
4329         int type;
4330         int rem = 0;
4331
4332         while (count) {
4333                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4334
4335                 if (!skb) {
4336                         struct { char type; } *pkt;
4337
4338                         /* Start of the frame */
4339                         pkt = data;
4340                         type = pkt->type;
4341
4342                         data++;
4343                         count--;
4344                 } else
4345                         type = bt_cb(skb)->pkt_type;
4346
4347                 rem = hci_reassembly(hdev, type, data, count,
4348                                      STREAM_REASSEMBLY);
4349                 if (rem < 0)
4350                         return rem;
4351
4352                 data += (count - rem);
4353                 count = rem;
4354         }
4355
4356         return rem;
4357 }
4358 EXPORT_SYMBOL(hci_recv_stream_fragment);
4359
4360 /* ---- Interface to upper protocols ---- */
4361
4362 int hci_register_cb(struct hci_cb *cb)
4363 {
4364         BT_DBG("%p name %s", cb, cb->name);
4365
4366         write_lock(&hci_cb_list_lock);
4367         list_add(&cb->list, &hci_cb_list);
4368         write_unlock(&hci_cb_list_lock);
4369
4370         return 0;
4371 }
4372 EXPORT_SYMBOL(hci_register_cb);
4373
4374 int hci_unregister_cb(struct hci_cb *cb)
4375 {
4376         BT_DBG("%p name %s", cb, cb->name);
4377
4378         write_lock(&hci_cb_list_lock);
4379         list_del(&cb->list);
4380         write_unlock(&hci_cb_list_lock);
4381
4382         return 0;
4383 }
4384 EXPORT_SYMBOL(hci_unregister_cb);
4385
4386 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4387 {
4388         int err;
4389
4390         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4391
4392         /* Time stamp */
4393         __net_timestamp(skb);
4394
4395         /* Send copy to monitor */
4396         hci_send_to_monitor(hdev, skb);
4397
4398         if (atomic_read(&hdev->promisc)) {
4399                 /* Send copy to the sockets */
4400                 hci_send_to_sock(hdev, skb);
4401         }
4402
4403         /* Get rid of skb owner, prior to sending to the driver. */
4404         skb_orphan(skb);
4405
4406         err = hdev->send(hdev, skb);
4407         if (err < 0) {
4408                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4409                 kfree_skb(skb);
4410         }
4411 }
4412
4413 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4414 {
4415         skb_queue_head_init(&req->cmd_q);
4416         req->hdev = hdev;
4417         req->err = 0;
4418 }
4419
4420 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4421 {
4422         struct hci_dev *hdev = req->hdev;
4423         struct sk_buff *skb;
4424         unsigned long flags;
4425
4426         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4427
4428         /* If an error occured during request building, remove all HCI
4429          * commands queued on the HCI request queue.
4430          */
4431         if (req->err) {
4432                 skb_queue_purge(&req->cmd_q);
4433                 return req->err;
4434         }
4435
4436         /* Do not allow empty requests */
4437         if (skb_queue_empty(&req->cmd_q))
4438                 return -ENODATA;
4439
4440         skb = skb_peek_tail(&req->cmd_q);
4441         bt_cb(skb)->req.complete = complete;
4442
4443         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4444         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4445         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4446
4447         queue_work(hdev->workqueue, &hdev->cmd_work);
4448
4449         return 0;
4450 }
4451
4452 bool hci_req_pending(struct hci_dev *hdev)
4453 {
4454         return (hdev->req_status == HCI_REQ_PEND);
4455 }
4456
4457 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4458                                        u32 plen, const void *param)
4459 {
4460         int len = HCI_COMMAND_HDR_SIZE + plen;
4461         struct hci_command_hdr *hdr;
4462         struct sk_buff *skb;
4463
4464         skb = bt_skb_alloc(len, GFP_ATOMIC);
4465         if (!skb)
4466                 return NULL;
4467
4468         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4469         hdr->opcode = cpu_to_le16(opcode);
4470         hdr->plen   = plen;
4471
4472         if (plen)
4473                 memcpy(skb_put(skb, plen), param, plen);
4474
4475         BT_DBG("skb len %d", skb->len);
4476
4477         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4478
4479         return skb;
4480 }
4481
4482 /* Send HCI command */
4483 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4484                  const void *param)
4485 {
4486         struct sk_buff *skb;
4487
4488         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4489
4490         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4491         if (!skb) {
4492                 BT_ERR("%s no memory for command", hdev->name);
4493                 return -ENOMEM;
4494         }
4495
4496         /* Stand-alone HCI commands must be flaged as
4497          * single-command requests.
4498          */
4499         bt_cb(skb)->req.start = true;
4500
4501         skb_queue_tail(&hdev->cmd_q, skb);
4502         queue_work(hdev->workqueue, &hdev->cmd_work);
4503
4504         return 0;
4505 }
4506
4507 /* Queue a command to an asynchronous HCI request */
4508 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4509                     const void *param, u8 event)
4510 {
4511         struct hci_dev *hdev = req->hdev;
4512         struct sk_buff *skb;
4513
4514         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4515
4516         /* If an error occured during request building, there is no point in
4517          * queueing the HCI command. We can simply return.
4518          */
4519         if (req->err)
4520                 return;
4521
4522         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4523         if (!skb) {
4524                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4525                        hdev->name, opcode);
4526                 req->err = -ENOMEM;
4527                 return;
4528         }
4529
4530         if (skb_queue_empty(&req->cmd_q))
4531                 bt_cb(skb)->req.start = true;
4532
4533         bt_cb(skb)->req.event = event;
4534
4535         skb_queue_tail(&req->cmd_q, skb);
4536 }
4537
4538 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4539                  const void *param)
4540 {
4541         hci_req_add_ev(req, opcode, plen, param, 0);
4542 }
4543
4544 /* Get data from the previously sent command */
4545 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4546 {
4547         struct hci_command_hdr *hdr;
4548
4549         if (!hdev->sent_cmd)
4550                 return NULL;
4551
4552         hdr = (void *) hdev->sent_cmd->data;
4553
4554         if (hdr->opcode != cpu_to_le16(opcode))
4555                 return NULL;
4556
4557         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4558
4559         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4560 }
4561
4562 /* Send ACL data */
4563 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4564 {
4565         struct hci_acl_hdr *hdr;
4566         int len = skb->len;
4567
4568         skb_push(skb, HCI_ACL_HDR_SIZE);
4569         skb_reset_transport_header(skb);
4570         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4571         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4572         hdr->dlen   = cpu_to_le16(len);
4573 }
4574
4575 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4576                           struct sk_buff *skb, __u16 flags)
4577 {
4578         struct hci_conn *conn = chan->conn;
4579         struct hci_dev *hdev = conn->hdev;
4580         struct sk_buff *list;
4581
4582         skb->len = skb_headlen(skb);
4583         skb->data_len = 0;
4584
4585         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4586
4587         switch (hdev->dev_type) {
4588         case HCI_BREDR:
4589                 hci_add_acl_hdr(skb, conn->handle, flags);
4590                 break;
4591         case HCI_AMP:
4592                 hci_add_acl_hdr(skb, chan->handle, flags);
4593                 break;
4594         default:
4595                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4596                 return;
4597         }
4598
4599         list = skb_shinfo(skb)->frag_list;
4600         if (!list) {
4601                 /* Non fragmented */
4602                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4603
4604                 skb_queue_tail(queue, skb);
4605         } else {
4606                 /* Fragmented */
4607                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4608
4609                 skb_shinfo(skb)->frag_list = NULL;
4610
4611                 /* Queue all fragments atomically */
4612                 spin_lock(&queue->lock);
4613
4614                 __skb_queue_tail(queue, skb);
4615
4616                 flags &= ~ACL_START;
4617                 flags |= ACL_CONT;
4618                 do {
4619                         skb = list; list = list->next;
4620
4621                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4622                         hci_add_acl_hdr(skb, conn->handle, flags);
4623
4624                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4625
4626                         __skb_queue_tail(queue, skb);
4627                 } while (list);
4628
4629                 spin_unlock(&queue->lock);
4630         }
4631 }
4632
4633 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4634 {
4635         struct hci_dev *hdev = chan->conn->hdev;
4636
4637         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4638
4639         hci_queue_acl(chan, &chan->data_q, skb, flags);
4640
4641         queue_work(hdev->workqueue, &hdev->tx_work);
4642 }
4643
4644 /* Send SCO data */
4645 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4646 {
4647         struct hci_dev *hdev = conn->hdev;
4648         struct hci_sco_hdr hdr;
4649
4650         BT_DBG("%s len %d", hdev->name, skb->len);
4651
4652         hdr.handle = cpu_to_le16(conn->handle);
4653         hdr.dlen   = skb->len;
4654
4655         skb_push(skb, HCI_SCO_HDR_SIZE);
4656         skb_reset_transport_header(skb);
4657         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4658
4659         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4660
4661         skb_queue_tail(&conn->data_q, skb);
4662         queue_work(hdev->workqueue, &hdev->tx_work);
4663 }
4664
4665 /* ---- HCI TX task (outgoing data) ---- */
4666
4667 /* HCI Connection scheduler */
4668 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4669                                      int *quote)
4670 {
4671         struct hci_conn_hash *h = &hdev->conn_hash;
4672         struct hci_conn *conn = NULL, *c;
4673         unsigned int num = 0, min = ~0;
4674
4675         /* We don't have to lock device here. Connections are always
4676          * added and removed with TX task disabled. */
4677
4678         rcu_read_lock();
4679
4680         list_for_each_entry_rcu(c, &h->list, list) {
4681                 if (c->type != type || skb_queue_empty(&c->data_q))
4682                         continue;
4683
4684                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4685                         continue;
4686
4687                 num++;
4688
4689                 if (c->sent < min) {
4690                         min  = c->sent;
4691                         conn = c;
4692                 }
4693
4694                 if (hci_conn_num(hdev, type) == num)
4695                         break;
4696         }
4697
4698         rcu_read_unlock();
4699
4700         if (conn) {
4701                 int cnt, q;
4702
4703                 switch (conn->type) {
4704                 case ACL_LINK:
4705                         cnt = hdev->acl_cnt;
4706                         break;
4707                 case SCO_LINK:
4708                 case ESCO_LINK:
4709                         cnt = hdev->sco_cnt;
4710                         break;
4711                 case LE_LINK:
4712                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4713                         break;
4714                 default:
4715                         cnt = 0;
4716                         BT_ERR("Unknown link type");
4717                 }
4718
4719                 q = cnt / num;
4720                 *quote = q ? q : 1;
4721         } else
4722                 *quote = 0;
4723
4724         BT_DBG("conn %p quote %d", conn, *quote);
4725         return conn;
4726 }
4727
4728 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4729 {
4730         struct hci_conn_hash *h = &hdev->conn_hash;
4731         struct hci_conn *c;
4732
4733         BT_ERR("%s link tx timeout", hdev->name);
4734
4735         rcu_read_lock();
4736
4737         /* Kill stalled connections */
4738         list_for_each_entry_rcu(c, &h->list, list) {
4739                 if (c->type == type && c->sent) {
4740                         BT_ERR("%s killing stalled connection %pMR",
4741                                hdev->name, &c->dst);
4742                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4743                 }
4744         }
4745
4746         rcu_read_unlock();
4747 }
4748
4749 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4750                                       int *quote)
4751 {
4752         struct hci_conn_hash *h = &hdev->conn_hash;
4753         struct hci_chan *chan = NULL;
4754         unsigned int num = 0, min = ~0, cur_prio = 0;
4755         struct hci_conn *conn;
4756         int cnt, q, conn_num = 0;
4757
4758         BT_DBG("%s", hdev->name);
4759
4760         rcu_read_lock();
4761
4762         list_for_each_entry_rcu(conn, &h->list, list) {
4763                 struct hci_chan *tmp;
4764
4765                 if (conn->type != type)
4766                         continue;
4767
4768                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4769                         continue;
4770
4771                 conn_num++;
4772
4773                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4774                         struct sk_buff *skb;
4775
4776                         if (skb_queue_empty(&tmp->data_q))
4777                                 continue;
4778
4779                         skb = skb_peek(&tmp->data_q);
4780                         if (skb->priority < cur_prio)
4781                                 continue;
4782
4783                         if (skb->priority > cur_prio) {
4784                                 num = 0;
4785                                 min = ~0;
4786                                 cur_prio = skb->priority;
4787                         }
4788
4789                         num++;
4790
4791                         if (conn->sent < min) {
4792                                 min  = conn->sent;
4793                                 chan = tmp;
4794                         }
4795                 }
4796
4797                 if (hci_conn_num(hdev, type) == conn_num)
4798                         break;
4799         }
4800
4801         rcu_read_unlock();
4802
4803         if (!chan)
4804                 return NULL;
4805
4806         switch (chan->conn->type) {
4807         case ACL_LINK:
4808                 cnt = hdev->acl_cnt;
4809                 break;
4810         case AMP_LINK:
4811                 cnt = hdev->block_cnt;
4812                 break;
4813         case SCO_LINK:
4814         case ESCO_LINK:
4815                 cnt = hdev->sco_cnt;
4816                 break;
4817         case LE_LINK:
4818                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4819                 break;
4820         default:
4821                 cnt = 0;
4822                 BT_ERR("Unknown link type");
4823         }
4824
4825         q = cnt / num;
4826         *quote = q ? q : 1;
4827         BT_DBG("chan %p quote %d", chan, *quote);
4828         return chan;
4829 }
4830
4831 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4832 {
4833         struct hci_conn_hash *h = &hdev->conn_hash;
4834         struct hci_conn *conn;
4835         int num = 0;
4836
4837         BT_DBG("%s", hdev->name);
4838
4839         rcu_read_lock();
4840
4841         list_for_each_entry_rcu(conn, &h->list, list) {
4842                 struct hci_chan *chan;
4843
4844                 if (conn->type != type)
4845                         continue;
4846
4847                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4848                         continue;
4849
4850                 num++;
4851
4852                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4853                         struct sk_buff *skb;
4854
4855                         if (chan->sent) {
4856                                 chan->sent = 0;
4857                                 continue;
4858                         }
4859
4860                         if (skb_queue_empty(&chan->data_q))
4861                                 continue;
4862
4863                         skb = skb_peek(&chan->data_q);
4864                         if (skb->priority >= HCI_PRIO_MAX - 1)
4865                                 continue;
4866
4867                         skb->priority = HCI_PRIO_MAX - 1;
4868
4869                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4870                                skb->priority);
4871                 }
4872
4873                 if (hci_conn_num(hdev, type) == num)
4874                         break;
4875         }
4876
4877         rcu_read_unlock();
4878
4879 }
4880
4881 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4882 {
4883         /* Calculate count of blocks used by this packet */
4884         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4885 }
4886
4887 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4888 {
4889         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4890                 /* ACL tx timeout must be longer than maximum
4891                  * link supervision timeout (40.9 seconds) */
4892                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4893                                        HCI_ACL_TX_TIMEOUT))
4894                         hci_link_tx_to(hdev, ACL_LINK);
4895         }
4896 }
4897
4898 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4899 {
4900         unsigned int cnt = hdev->acl_cnt;
4901         struct hci_chan *chan;
4902         struct sk_buff *skb;
4903         int quote;
4904
4905         __check_timeout(hdev, cnt);
4906
4907         while (hdev->acl_cnt &&
4908                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4909                 u32 priority = (skb_peek(&chan->data_q))->priority;
4910                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4911                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4912                                skb->len, skb->priority);
4913
4914                         /* Stop if priority has changed */
4915                         if (skb->priority < priority)
4916                                 break;
4917
4918                         skb = skb_dequeue(&chan->data_q);
4919
4920                         hci_conn_enter_active_mode(chan->conn,
4921                                                    bt_cb(skb)->force_active);
4922
4923                         hci_send_frame(hdev, skb);
4924                         hdev->acl_last_tx = jiffies;
4925
4926                         hdev->acl_cnt--;
4927                         chan->sent++;
4928                         chan->conn->sent++;
4929                 }
4930         }
4931
4932         if (cnt != hdev->acl_cnt)
4933                 hci_prio_recalculate(hdev, ACL_LINK);
4934 }
4935
4936 static void hci_sched_acl_blk(struct hci_dev *hdev)
4937 {
4938         unsigned int cnt = hdev->block_cnt;
4939         struct hci_chan *chan;
4940         struct sk_buff *skb;
4941         int quote;
4942         u8 type;
4943
4944         __check_timeout(hdev, cnt);
4945
4946         BT_DBG("%s", hdev->name);
4947
4948         if (hdev->dev_type == HCI_AMP)
4949                 type = AMP_LINK;
4950         else
4951                 type = ACL_LINK;
4952
4953         while (hdev->block_cnt > 0 &&
4954                (chan = hci_chan_sent(hdev, type, &quote))) {
4955                 u32 priority = (skb_peek(&chan->data_q))->priority;
4956                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4957                         int blocks;
4958
4959                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4960                                skb->len, skb->priority);
4961
4962                         /* Stop if priority has changed */
4963                         if (skb->priority < priority)
4964                                 break;
4965
4966                         skb = skb_dequeue(&chan->data_q);
4967
4968                         blocks = __get_blocks(hdev, skb);
4969                         if (blocks > hdev->block_cnt)
4970                                 return;
4971
4972                         hci_conn_enter_active_mode(chan->conn,
4973                                                    bt_cb(skb)->force_active);
4974
4975                         hci_send_frame(hdev, skb);
4976                         hdev->acl_last_tx = jiffies;
4977
4978                         hdev->block_cnt -= blocks;
4979                         quote -= blocks;
4980
4981                         chan->sent += blocks;
4982                         chan->conn->sent += blocks;
4983                 }
4984         }
4985
4986         if (cnt != hdev->block_cnt)
4987                 hci_prio_recalculate(hdev, type);
4988 }
4989
4990 static void hci_sched_acl(struct hci_dev *hdev)
4991 {
4992         BT_DBG("%s", hdev->name);
4993
4994         /* No ACL link over BR/EDR controller */
4995         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4996                 return;
4997
4998         /* No AMP link over AMP controller */
4999         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5000                 return;
5001
5002         switch (hdev->flow_ctl_mode) {
5003         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5004                 hci_sched_acl_pkt(hdev);
5005                 break;
5006
5007         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5008                 hci_sched_acl_blk(hdev);
5009                 break;
5010         }
5011 }
5012
5013 /* Schedule SCO */
5014 static void hci_sched_sco(struct hci_dev *hdev)
5015 {
5016         struct hci_conn *conn;
5017         struct sk_buff *skb;
5018         int quote;
5019
5020         BT_DBG("%s", hdev->name);
5021
5022         if (!hci_conn_num(hdev, SCO_LINK))
5023                 return;
5024
5025         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5026                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5027                         BT_DBG("skb %p len %d", skb, skb->len);
5028                         hci_send_frame(hdev, skb);
5029
5030                         conn->sent++;
5031                         if (conn->sent == ~0)
5032                                 conn->sent = 0;
5033                 }
5034         }
5035 }
5036
5037 static void hci_sched_esco(struct hci_dev *hdev)
5038 {
5039         struct hci_conn *conn;
5040         struct sk_buff *skb;
5041         int quote;
5042
5043         BT_DBG("%s", hdev->name);
5044
5045         if (!hci_conn_num(hdev, ESCO_LINK))
5046                 return;
5047
5048         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5049                                                      &quote))) {
5050                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5051                         BT_DBG("skb %p len %d", skb, skb->len);
5052                         hci_send_frame(hdev, skb);
5053
5054                         conn->sent++;
5055                         if (conn->sent == ~0)
5056                                 conn->sent = 0;
5057                 }
5058         }
5059 }
5060
5061 static void hci_sched_le(struct hci_dev *hdev)
5062 {
5063         struct hci_chan *chan;
5064         struct sk_buff *skb;
5065         int quote, cnt, tmp;
5066
5067         BT_DBG("%s", hdev->name);
5068
5069         if (!hci_conn_num(hdev, LE_LINK))
5070                 return;
5071
5072         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5073                 /* LE tx timeout must be longer than maximum
5074                  * link supervision timeout (40.9 seconds) */
5075                 if (!hdev->le_cnt && hdev->le_pkts &&
5076                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5077                         hci_link_tx_to(hdev, LE_LINK);
5078         }
5079
5080         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5081         tmp = cnt;
5082         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5083                 u32 priority = (skb_peek(&chan->data_q))->priority;
5084                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5085                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5086                                skb->len, skb->priority);
5087
5088                         /* Stop if priority has changed */
5089                         if (skb->priority < priority)
5090                                 break;
5091
5092                         skb = skb_dequeue(&chan->data_q);
5093
5094                         hci_send_frame(hdev, skb);
5095                         hdev->le_last_tx = jiffies;
5096
5097                         cnt--;
5098                         chan->sent++;
5099                         chan->conn->sent++;
5100                 }
5101         }
5102
5103         if (hdev->le_pkts)
5104                 hdev->le_cnt = cnt;
5105         else
5106                 hdev->acl_cnt = cnt;
5107
5108         if (cnt != tmp)
5109                 hci_prio_recalculate(hdev, LE_LINK);
5110 }
5111
5112 static void hci_tx_work(struct work_struct *work)
5113 {
5114         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5115         struct sk_buff *skb;
5116
5117         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5118                hdev->sco_cnt, hdev->le_cnt);
5119
5120         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5121                 /* Schedule queues and send stuff to HCI driver */
5122                 hci_sched_acl(hdev);
5123                 hci_sched_sco(hdev);
5124                 hci_sched_esco(hdev);
5125                 hci_sched_le(hdev);
5126         }
5127
5128         /* Send next queued raw (unknown type) packet */
5129         while ((skb = skb_dequeue(&hdev->raw_q)))
5130                 hci_send_frame(hdev, skb);
5131 }
5132
5133 /* ----- HCI RX task (incoming data processing) ----- */
5134
5135 /* ACL data packet */
5136 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5137 {
5138         struct hci_acl_hdr *hdr = (void *) skb->data;
5139         struct hci_conn *conn;
5140         __u16 handle, flags;
5141
5142         skb_pull(skb, HCI_ACL_HDR_SIZE);
5143
5144         handle = __le16_to_cpu(hdr->handle);
5145         flags  = hci_flags(handle);
5146         handle = hci_handle(handle);
5147
5148         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5149                handle, flags);
5150
5151         hdev->stat.acl_rx++;
5152
5153         hci_dev_lock(hdev);
5154         conn = hci_conn_hash_lookup_handle(hdev, handle);
5155         hci_dev_unlock(hdev);
5156
5157         if (conn) {
5158                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5159
5160                 /* Send to upper protocol */
5161                 l2cap_recv_acldata(conn, skb, flags);
5162                 return;
5163         } else {
5164                 BT_ERR("%s ACL packet for unknown connection handle %d",
5165                        hdev->name, handle);
5166         }
5167
5168         kfree_skb(skb);
5169 }
5170
5171 /* SCO data packet */
5172 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5173 {
5174         struct hci_sco_hdr *hdr = (void *) skb->data;
5175         struct hci_conn *conn;
5176         __u16 handle;
5177
5178         skb_pull(skb, HCI_SCO_HDR_SIZE);
5179
5180         handle = __le16_to_cpu(hdr->handle);
5181
5182         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5183
5184         hdev->stat.sco_rx++;
5185
5186         hci_dev_lock(hdev);
5187         conn = hci_conn_hash_lookup_handle(hdev, handle);
5188         hci_dev_unlock(hdev);
5189
5190         if (conn) {
5191                 /* Send to upper protocol */
5192                 sco_recv_scodata(conn, skb);
5193                 return;
5194         } else {
5195                 BT_ERR("%s SCO packet for unknown connection handle %d",
5196                        hdev->name, handle);
5197         }
5198
5199         kfree_skb(skb);
5200 }
5201
5202 static bool hci_req_is_complete(struct hci_dev *hdev)
5203 {
5204         struct sk_buff *skb;
5205
5206         skb = skb_peek(&hdev->cmd_q);
5207         if (!skb)
5208                 return true;
5209
5210         return bt_cb(skb)->req.start;
5211 }
5212
5213 static void hci_resend_last(struct hci_dev *hdev)
5214 {
5215         struct hci_command_hdr *sent;
5216         struct sk_buff *skb;
5217         u16 opcode;
5218
5219         if (!hdev->sent_cmd)
5220                 return;
5221
5222         sent = (void *) hdev->sent_cmd->data;
5223         opcode = __le16_to_cpu(sent->opcode);
5224         if (opcode == HCI_OP_RESET)
5225                 return;
5226
5227         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5228         if (!skb)
5229                 return;
5230
5231         skb_queue_head(&hdev->cmd_q, skb);
5232         queue_work(hdev->workqueue, &hdev->cmd_work);
5233 }
5234
5235 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5236 {
5237         hci_req_complete_t req_complete = NULL;
5238         struct sk_buff *skb;
5239         unsigned long flags;
5240
5241         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5242
5243         /* If the completed command doesn't match the last one that was
5244          * sent we need to do special handling of it.
5245          */
5246         if (!hci_sent_cmd_data(hdev, opcode)) {
5247                 /* Some CSR based controllers generate a spontaneous
5248                  * reset complete event during init and any pending
5249                  * command will never be completed. In such a case we
5250                  * need to resend whatever was the last sent
5251                  * command.
5252                  */
5253                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5254                         hci_resend_last(hdev);
5255
5256                 return;
5257         }
5258
5259         /* If the command succeeded and there's still more commands in
5260          * this request the request is not yet complete.
5261          */
5262         if (!status && !hci_req_is_complete(hdev))
5263                 return;
5264
5265         /* If this was the last command in a request the complete
5266          * callback would be found in hdev->sent_cmd instead of the
5267          * command queue (hdev->cmd_q).
5268          */
5269         if (hdev->sent_cmd) {
5270                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5271
5272                 if (req_complete) {
5273                         /* We must set the complete callback to NULL to
5274                          * avoid calling the callback more than once if
5275                          * this function gets called again.
5276                          */
5277                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5278
5279                         goto call_complete;
5280                 }
5281         }
5282
5283         /* Remove all pending commands belonging to this request */
5284         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5285         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5286                 if (bt_cb(skb)->req.start) {
5287                         __skb_queue_head(&hdev->cmd_q, skb);
5288                         break;
5289                 }
5290
5291                 req_complete = bt_cb(skb)->req.complete;
5292                 kfree_skb(skb);
5293         }
5294         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5295
5296 call_complete:
5297         if (req_complete)
5298                 req_complete(hdev, status);
5299 }
5300
5301 static void hci_rx_work(struct work_struct *work)
5302 {
5303         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5304         struct sk_buff *skb;
5305
5306         BT_DBG("%s", hdev->name);
5307
5308         while ((skb = skb_dequeue(&hdev->rx_q))) {
5309                 /* Send copy to monitor */
5310                 hci_send_to_monitor(hdev, skb);
5311
5312                 if (atomic_read(&hdev->promisc)) {
5313                         /* Send copy to the sockets */
5314                         hci_send_to_sock(hdev, skb);
5315                 }
5316
5317                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5318                         kfree_skb(skb);
5319                         continue;
5320                 }
5321
5322                 if (test_bit(HCI_INIT, &hdev->flags)) {
5323                         /* Don't process data packets in this states. */
5324                         switch (bt_cb(skb)->pkt_type) {
5325                         case HCI_ACLDATA_PKT:
5326                         case HCI_SCODATA_PKT:
5327                                 kfree_skb(skb);
5328                                 continue;
5329                         }
5330                 }
5331
5332                 /* Process frame */
5333                 switch (bt_cb(skb)->pkt_type) {
5334                 case HCI_EVENT_PKT:
5335                         BT_DBG("%s Event packet", hdev->name);
5336                         hci_event_packet(hdev, skb);
5337                         break;
5338
5339                 case HCI_ACLDATA_PKT:
5340                         BT_DBG("%s ACL data packet", hdev->name);
5341                         hci_acldata_packet(hdev, skb);
5342                         break;
5343
5344                 case HCI_SCODATA_PKT:
5345                         BT_DBG("%s SCO data packet", hdev->name);
5346                         hci_scodata_packet(hdev, skb);
5347                         break;
5348
5349                 default:
5350                         kfree_skb(skb);
5351                         break;
5352                 }
5353         }
5354 }
5355
5356 static void hci_cmd_work(struct work_struct *work)
5357 {
5358         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5359         struct sk_buff *skb;
5360
5361         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5362                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5363
5364         /* Send queued commands */
5365         if (atomic_read(&hdev->cmd_cnt)) {
5366                 skb = skb_dequeue(&hdev->cmd_q);
5367                 if (!skb)
5368                         return;
5369
5370                 kfree_skb(hdev->sent_cmd);
5371
5372                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5373                 if (hdev->sent_cmd) {
5374                         atomic_dec(&hdev->cmd_cnt);
5375                         hci_send_frame(hdev, skb);
5376                         if (test_bit(HCI_RESET, &hdev->flags))
5377                                 cancel_delayed_work(&hdev->cmd_timer);
5378                         else
5379                                 schedule_delayed_work(&hdev->cmd_timer,
5380                                                       HCI_CMD_TIMEOUT);
5381                 } else {
5382                         skb_queue_head(&hdev->cmd_q, skb);
5383                         queue_work(hdev->workqueue, &hdev->cmd_work);
5384                 }
5385         }
5386 }
5387
5388 void hci_req_add_le_scan_disable(struct hci_request *req)
5389 {
5390         struct hci_cp_le_set_scan_enable cp;
5391
5392         memset(&cp, 0, sizeof(cp));
5393         cp.enable = LE_SCAN_DISABLE;
5394         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5395 }
5396
5397 void hci_req_add_le_passive_scan(struct hci_request *req)
5398 {
5399         struct hci_cp_le_set_scan_param param_cp;
5400         struct hci_cp_le_set_scan_enable enable_cp;
5401         struct hci_dev *hdev = req->hdev;
5402         u8 own_addr_type;
5403
5404         /* Set require_privacy to false since no SCAN_REQ are send
5405          * during passive scanning. Not using an unresolvable address
5406          * here is important so that peer devices using direct
5407          * advertising with our address will be correctly reported
5408          * by the controller.
5409          */
5410         if (hci_update_random_address(req, false, &own_addr_type))
5411                 return;
5412
5413         memset(&param_cp, 0, sizeof(param_cp));
5414         param_cp.type = LE_SCAN_PASSIVE;
5415         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5416         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5417         param_cp.own_address_type = own_addr_type;
5418         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5419                     &param_cp);
5420
5421         memset(&enable_cp, 0, sizeof(enable_cp));
5422         enable_cp.enable = LE_SCAN_ENABLE;
5423         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5424         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5425                     &enable_cp);
5426 }
5427
5428 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5429 {
5430         if (status)
5431                 BT_DBG("HCI request failed to update background scanning: "
5432                        "status 0x%2.2x", status);
5433 }
5434
5435 /* This function controls the background scanning based on hdev->pend_le_conns
5436  * list. If there are pending LE connection we start the background scanning,
5437  * otherwise we stop it.
5438  *
5439  * This function requires the caller holds hdev->lock.
5440  */
5441 void hci_update_background_scan(struct hci_dev *hdev)
5442 {
5443         struct hci_request req;
5444         struct hci_conn *conn;
5445         int err;
5446
5447         if (!test_bit(HCI_UP, &hdev->flags) ||
5448             test_bit(HCI_INIT, &hdev->flags) ||
5449             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5450             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5451             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5452             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5453                 return;
5454
5455         /* No point in doing scanning if LE support hasn't been enabled */
5456         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5457                 return;
5458
5459         /* If discovery is active don't interfere with it */
5460         if (hdev->discovery.state != DISCOVERY_STOPPED)
5461                 return;
5462
5463         hci_req_init(&req, hdev);
5464
5465         if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5466             list_empty(&hdev->pend_le_conns) &&
5467             list_empty(&hdev->pend_le_reports)) {
5468                 /* If there is no pending LE connections or devices
5469                  * to be scanned for, we should stop the background
5470                  * scanning.
5471                  */
5472
5473                 /* If controller is not scanning we are done. */
5474                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5475                         return;
5476
5477                 hci_req_add_le_scan_disable(&req);
5478
5479                 BT_DBG("%s stopping background scanning", hdev->name);
5480         } else {
5481                 /* If there is at least one pending LE connection, we should
5482                  * keep the background scan running.
5483                  */
5484
5485                 /* If controller is connecting, we should not start scanning
5486                  * since some controllers are not able to scan and connect at
5487                  * the same time.
5488                  */
5489                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5490                 if (conn)
5491                         return;
5492
5493                 /* If controller is currently scanning, we stop it to ensure we
5494                  * don't miss any advertising (due to duplicates filter).
5495                  */
5496                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5497                         hci_req_add_le_scan_disable(&req);
5498
5499                 hci_req_add_le_passive_scan(&req);
5500
5501                 BT_DBG("%s starting background scanning", hdev->name);
5502         }
5503
5504         err = hci_req_run(&req, update_background_scan_complete);
5505         if (err)
5506                 BT_ERR("Failed to run HCI request: err %d", err);
5507 }