Merge tag 'batman-adv-for-davem' of git://git.open-mesh.org/linux-merge
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE      0
60 #define HCI_REQ_PEND      1
61 #define HCI_REQ_CANCELED  2
62
63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70         hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76                              size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         char buf[3];
80
81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82         buf[1] = '\n';
83         buf[2] = '\0';
84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88                               size_t count, loff_t *ppos)
89 {
90         struct hci_dev *hdev = file->private_data;
91         struct sk_buff *skb;
92         char buf[32];
93         size_t buf_size = min(count, (sizeof(buf)-1));
94         bool enable;
95         int err;
96
97         if (!test_bit(HCI_UP, &hdev->flags))
98                 return -ENETDOWN;
99
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124
125         if (err < 0)
126                 return err;
127
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130         return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162
163         return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187
188         return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202
203 static int whitelist_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bdaddr_list *b;
207
208         hci_dev_lock(hdev);
209         list_for_each_entry(b, &hdev->whitelist, list)
210                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211         hci_dev_unlock(hdev);
212
213         return 0;
214 }
215
216 static int whitelist_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, whitelist_show, inode->i_private);
219 }
220
221 static const struct file_operations whitelist_fops = {
222         .open           = whitelist_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227
228 static int uuids_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct bt_uuid *uuid;
232
233         hci_dev_lock(hdev);
234         list_for_each_entry(uuid, &hdev->uuids, list) {
235                 u8 i, val[16];
236
237                 /* The Bluetooth UUID values are stored in big endian,
238                  * but with reversed byte order. So convert them into
239                  * the right order for the %pUb modifier.
240                  */
241                 for (i = 0; i < 16; i++)
242                         val[i] = uuid->uuid[15 - i];
243
244                 seq_printf(f, "%pUb\n", val);
245         }
246         hci_dev_unlock(hdev);
247
248         return 0;
249 }
250
251 static int uuids_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, uuids_show, inode->i_private);
254 }
255
256 static const struct file_operations uuids_fops = {
257         .open           = uuids_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262
263 static int inquiry_cache_show(struct seq_file *f, void *p)
264 {
265         struct hci_dev *hdev = f->private;
266         struct discovery_state *cache = &hdev->discovery;
267         struct inquiry_entry *e;
268
269         hci_dev_lock(hdev);
270
271         list_for_each_entry(e, &cache->all, all) {
272                 struct inquiry_data *data = &e->data;
273                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274                            &data->bdaddr,
275                            data->pscan_rep_mode, data->pscan_period_mode,
276                            data->pscan_mode, data->dev_class[2],
277                            data->dev_class[1], data->dev_class[0],
278                            __le16_to_cpu(data->clock_offset),
279                            data->rssi, data->ssp_mode, e->timestamp);
280         }
281
282         hci_dev_unlock(hdev);
283
284         return 0;
285 }
286
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
288 {
289         return single_open(file, inquiry_cache_show, inode->i_private);
290 }
291
292 static const struct file_operations inquiry_cache_fops = {
293         .open           = inquiry_cache_open,
294         .read           = seq_read,
295         .llseek         = seq_lseek,
296         .release        = single_release,
297 };
298
299 static int link_keys_show(struct seq_file *f, void *ptr)
300 {
301         struct hci_dev *hdev = f->private;
302         struct list_head *p, *n;
303
304         hci_dev_lock(hdev);
305         list_for_each_safe(p, n, &hdev->link_keys) {
306                 struct link_key *key = list_entry(p, struct link_key, list);
307                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309         }
310         hci_dev_unlock(hdev);
311
312         return 0;
313 }
314
315 static int link_keys_open(struct inode *inode, struct file *file)
316 {
317         return single_open(file, link_keys_show, inode->i_private);
318 }
319
320 static const struct file_operations link_keys_fops = {
321         .open           = link_keys_open,
322         .read           = seq_read,
323         .llseek         = seq_lseek,
324         .release        = single_release,
325 };
326
327 static int dev_class_show(struct seq_file *f, void *ptr)
328 {
329         struct hci_dev *hdev = f->private;
330
331         hci_dev_lock(hdev);
332         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333                    hdev->dev_class[1], hdev->dev_class[0]);
334         hci_dev_unlock(hdev);
335
336         return 0;
337 }
338
339 static int dev_class_open(struct inode *inode, struct file *file)
340 {
341         return single_open(file, dev_class_show, inode->i_private);
342 }
343
344 static const struct file_operations dev_class_fops = {
345         .open           = dev_class_open,
346         .read           = seq_read,
347         .llseek         = seq_lseek,
348         .release        = single_release,
349 };
350
351 static int voice_setting_get(void *data, u64 *val)
352 {
353         struct hci_dev *hdev = data;
354
355         hci_dev_lock(hdev);
356         *val = hdev->voice_setting;
357         hci_dev_unlock(hdev);
358
359         return 0;
360 }
361
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363                         NULL, "0x%4.4llx\n");
364
365 static int auto_accept_delay_set(void *data, u64 val)
366 {
367         struct hci_dev *hdev = data;
368
369         hci_dev_lock(hdev);
370         hdev->auto_accept_delay = val;
371         hci_dev_unlock(hdev);
372
373         return 0;
374 }
375
376 static int auto_accept_delay_get(void *data, u64 *val)
377 {
378         struct hci_dev *hdev = data;
379
380         hci_dev_lock(hdev);
381         *val = hdev->auto_accept_delay;
382         hci_dev_unlock(hdev);
383
384         return 0;
385 }
386
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388                         auto_accept_delay_set, "%llu\n");
389
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391                                      size_t count, loff_t *ppos)
392 {
393         struct hci_dev *hdev = file->private_data;
394         char buf[3];
395
396         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
397         buf[1] = '\n';
398         buf[2] = '\0';
399         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400 }
401
402 static ssize_t force_sc_support_write(struct file *file,
403                                       const char __user *user_buf,
404                                       size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[32];
408         size_t buf_size = min(count, (sizeof(buf)-1));
409         bool enable;
410
411         if (test_bit(HCI_UP, &hdev->flags))
412                 return -EBUSY;
413
414         if (copy_from_user(buf, user_buf, buf_size))
415                 return -EFAULT;
416
417         buf[buf_size] = '\0';
418         if (strtobool(buf, &enable))
419                 return -EINVAL;
420
421         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
422                 return -EALREADY;
423
424         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
425
426         return count;
427 }
428
429 static const struct file_operations force_sc_support_fops = {
430         .open           = simple_open,
431         .read           = force_sc_support_read,
432         .write          = force_sc_support_write,
433         .llseek         = default_llseek,
434 };
435
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437                                  size_t count, loff_t *ppos)
438 {
439         struct hci_dev *hdev = file->private_data;
440         char buf[3];
441
442         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443         buf[1] = '\n';
444         buf[2] = '\0';
445         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446 }
447
448 static const struct file_operations sc_only_mode_fops = {
449         .open           = simple_open,
450         .read           = sc_only_mode_read,
451         .llseek         = default_llseek,
452 };
453
454 static int idle_timeout_set(void *data, u64 val)
455 {
456         struct hci_dev *hdev = data;
457
458         if (val != 0 && (val < 500 || val > 3600000))
459                 return -EINVAL;
460
461         hci_dev_lock(hdev);
462         hdev->idle_timeout = val;
463         hci_dev_unlock(hdev);
464
465         return 0;
466 }
467
468 static int idle_timeout_get(void *data, u64 *val)
469 {
470         struct hci_dev *hdev = data;
471
472         hci_dev_lock(hdev);
473         *val = hdev->idle_timeout;
474         hci_dev_unlock(hdev);
475
476         return 0;
477 }
478
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480                         idle_timeout_set, "%llu\n");
481
482 static int rpa_timeout_set(void *data, u64 val)
483 {
484         struct hci_dev *hdev = data;
485
486         /* Require the RPA timeout to be at least 30 seconds and at most
487          * 24 hours.
488          */
489         if (val < 30 || val > (60 * 60 * 24))
490                 return -EINVAL;
491
492         hci_dev_lock(hdev);
493         hdev->rpa_timeout = val;
494         hci_dev_unlock(hdev);
495
496         return 0;
497 }
498
499 static int rpa_timeout_get(void *data, u64 *val)
500 {
501         struct hci_dev *hdev = data;
502
503         hci_dev_lock(hdev);
504         *val = hdev->rpa_timeout;
505         hci_dev_unlock(hdev);
506
507         return 0;
508 }
509
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511                         rpa_timeout_set, "%llu\n");
512
513 static int sniff_min_interval_set(void *data, u64 val)
514 {
515         struct hci_dev *hdev = data;
516
517         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518                 return -EINVAL;
519
520         hci_dev_lock(hdev);
521         hdev->sniff_min_interval = val;
522         hci_dev_unlock(hdev);
523
524         return 0;
525 }
526
527 static int sniff_min_interval_get(void *data, u64 *val)
528 {
529         struct hci_dev *hdev = data;
530
531         hci_dev_lock(hdev);
532         *val = hdev->sniff_min_interval;
533         hci_dev_unlock(hdev);
534
535         return 0;
536 }
537
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539                         sniff_min_interval_set, "%llu\n");
540
541 static int sniff_max_interval_set(void *data, u64 val)
542 {
543         struct hci_dev *hdev = data;
544
545         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546                 return -EINVAL;
547
548         hci_dev_lock(hdev);
549         hdev->sniff_max_interval = val;
550         hci_dev_unlock(hdev);
551
552         return 0;
553 }
554
555 static int sniff_max_interval_get(void *data, u64 *val)
556 {
557         struct hci_dev *hdev = data;
558
559         hci_dev_lock(hdev);
560         *val = hdev->sniff_max_interval;
561         hci_dev_unlock(hdev);
562
563         return 0;
564 }
565
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567                         sniff_max_interval_set, "%llu\n");
568
569 static int conn_info_min_age_set(void *data, u64 val)
570 {
571         struct hci_dev *hdev = data;
572
573         if (val == 0 || val > hdev->conn_info_max_age)
574                 return -EINVAL;
575
576         hci_dev_lock(hdev);
577         hdev->conn_info_min_age = val;
578         hci_dev_unlock(hdev);
579
580         return 0;
581 }
582
583 static int conn_info_min_age_get(void *data, u64 *val)
584 {
585         struct hci_dev *hdev = data;
586
587         hci_dev_lock(hdev);
588         *val = hdev->conn_info_min_age;
589         hci_dev_unlock(hdev);
590
591         return 0;
592 }
593
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595                         conn_info_min_age_set, "%llu\n");
596
597 static int conn_info_max_age_set(void *data, u64 val)
598 {
599         struct hci_dev *hdev = data;
600
601         if (val == 0 || val < hdev->conn_info_min_age)
602                 return -EINVAL;
603
604         hci_dev_lock(hdev);
605         hdev->conn_info_max_age = val;
606         hci_dev_unlock(hdev);
607
608         return 0;
609 }
610
611 static int conn_info_max_age_get(void *data, u64 *val)
612 {
613         struct hci_dev *hdev = data;
614
615         hci_dev_lock(hdev);
616         *val = hdev->conn_info_max_age;
617         hci_dev_unlock(hdev);
618
619         return 0;
620 }
621
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623                         conn_info_max_age_set, "%llu\n");
624
625 static int identity_show(struct seq_file *f, void *p)
626 {
627         struct hci_dev *hdev = f->private;
628         bdaddr_t addr;
629         u8 addr_type;
630
631         hci_dev_lock(hdev);
632
633         hci_copy_identity_address(hdev, &addr, &addr_type);
634
635         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636                    16, hdev->irk, &hdev->rpa);
637
638         hci_dev_unlock(hdev);
639
640         return 0;
641 }
642
643 static int identity_open(struct inode *inode, struct file *file)
644 {
645         return single_open(file, identity_show, inode->i_private);
646 }
647
648 static const struct file_operations identity_fops = {
649         .open           = identity_open,
650         .read           = seq_read,
651         .llseek         = seq_lseek,
652         .release        = single_release,
653 };
654
655 static int random_address_show(struct seq_file *f, void *p)
656 {
657         struct hci_dev *hdev = f->private;
658
659         hci_dev_lock(hdev);
660         seq_printf(f, "%pMR\n", &hdev->random_addr);
661         hci_dev_unlock(hdev);
662
663         return 0;
664 }
665
666 static int random_address_open(struct inode *inode, struct file *file)
667 {
668         return single_open(file, random_address_show, inode->i_private);
669 }
670
671 static const struct file_operations random_address_fops = {
672         .open           = random_address_open,
673         .read           = seq_read,
674         .llseek         = seq_lseek,
675         .release        = single_release,
676 };
677
678 static int static_address_show(struct seq_file *f, void *p)
679 {
680         struct hci_dev *hdev = f->private;
681
682         hci_dev_lock(hdev);
683         seq_printf(f, "%pMR\n", &hdev->static_addr);
684         hci_dev_unlock(hdev);
685
686         return 0;
687 }
688
689 static int static_address_open(struct inode *inode, struct file *file)
690 {
691         return single_open(file, static_address_show, inode->i_private);
692 }
693
694 static const struct file_operations static_address_fops = {
695         .open           = static_address_open,
696         .read           = seq_read,
697         .llseek         = seq_lseek,
698         .release        = single_release,
699 };
700
701 static ssize_t force_static_address_read(struct file *file,
702                                          char __user *user_buf,
703                                          size_t count, loff_t *ppos)
704 {
705         struct hci_dev *hdev = file->private_data;
706         char buf[3];
707
708         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
709         buf[1] = '\n';
710         buf[2] = '\0';
711         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712 }
713
714 static ssize_t force_static_address_write(struct file *file,
715                                           const char __user *user_buf,
716                                           size_t count, loff_t *ppos)
717 {
718         struct hci_dev *hdev = file->private_data;
719         char buf[32];
720         size_t buf_size = min(count, (sizeof(buf)-1));
721         bool enable;
722
723         if (test_bit(HCI_UP, &hdev->flags))
724                 return -EBUSY;
725
726         if (copy_from_user(buf, user_buf, buf_size))
727                 return -EFAULT;
728
729         buf[buf_size] = '\0';
730         if (strtobool(buf, &enable))
731                 return -EINVAL;
732
733         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
734                 return -EALREADY;
735
736         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
737
738         return count;
739 }
740
741 static const struct file_operations force_static_address_fops = {
742         .open           = simple_open,
743         .read           = force_static_address_read,
744         .write          = force_static_address_write,
745         .llseek         = default_llseek,
746 };
747
748 static int white_list_show(struct seq_file *f, void *ptr)
749 {
750         struct hci_dev *hdev = f->private;
751         struct bdaddr_list *b;
752
753         hci_dev_lock(hdev);
754         list_for_each_entry(b, &hdev->le_white_list, list)
755                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756         hci_dev_unlock(hdev);
757
758         return 0;
759 }
760
761 static int white_list_open(struct inode *inode, struct file *file)
762 {
763         return single_open(file, white_list_show, inode->i_private);
764 }
765
766 static const struct file_operations white_list_fops = {
767         .open           = white_list_open,
768         .read           = seq_read,
769         .llseek         = seq_lseek,
770         .release        = single_release,
771 };
772
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774 {
775         struct hci_dev *hdev = f->private;
776         struct list_head *p, *n;
777
778         hci_dev_lock(hdev);
779         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782                            &irk->bdaddr, irk->addr_type,
783                            16, irk->val, &irk->rpa);
784         }
785         hci_dev_unlock(hdev);
786
787         return 0;
788 }
789
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791 {
792         return single_open(file, identity_resolving_keys_show,
793                            inode->i_private);
794 }
795
796 static const struct file_operations identity_resolving_keys_fops = {
797         .open           = identity_resolving_keys_open,
798         .read           = seq_read,
799         .llseek         = seq_lseek,
800         .release        = single_release,
801 };
802
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
804 {
805         struct hci_dev *hdev = f->private;
806         struct list_head *p, *n;
807
808         hci_dev_lock(hdev);
809         list_for_each_safe(p, n, &hdev->long_term_keys) {
810                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814                            __le64_to_cpu(ltk->rand), 16, ltk->val);
815         }
816         hci_dev_unlock(hdev);
817
818         return 0;
819 }
820
821 static int long_term_keys_open(struct inode *inode, struct file *file)
822 {
823         return single_open(file, long_term_keys_show, inode->i_private);
824 }
825
826 static const struct file_operations long_term_keys_fops = {
827         .open           = long_term_keys_open,
828         .read           = seq_read,
829         .llseek         = seq_lseek,
830         .release        = single_release,
831 };
832
833 static int conn_min_interval_set(void *data, u64 val)
834 {
835         struct hci_dev *hdev = data;
836
837         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838                 return -EINVAL;
839
840         hci_dev_lock(hdev);
841         hdev->le_conn_min_interval = val;
842         hci_dev_unlock(hdev);
843
844         return 0;
845 }
846
847 static int conn_min_interval_get(void *data, u64 *val)
848 {
849         struct hci_dev *hdev = data;
850
851         hci_dev_lock(hdev);
852         *val = hdev->le_conn_min_interval;
853         hci_dev_unlock(hdev);
854
855         return 0;
856 }
857
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859                         conn_min_interval_set, "%llu\n");
860
861 static int conn_max_interval_set(void *data, u64 val)
862 {
863         struct hci_dev *hdev = data;
864
865         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866                 return -EINVAL;
867
868         hci_dev_lock(hdev);
869         hdev->le_conn_max_interval = val;
870         hci_dev_unlock(hdev);
871
872         return 0;
873 }
874
875 static int conn_max_interval_get(void *data, u64 *val)
876 {
877         struct hci_dev *hdev = data;
878
879         hci_dev_lock(hdev);
880         *val = hdev->le_conn_max_interval;
881         hci_dev_unlock(hdev);
882
883         return 0;
884 }
885
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887                         conn_max_interval_set, "%llu\n");
888
889 static int conn_latency_set(void *data, u64 val)
890 {
891         struct hci_dev *hdev = data;
892
893         if (val > 0x01f3)
894                 return -EINVAL;
895
896         hci_dev_lock(hdev);
897         hdev->le_conn_latency = val;
898         hci_dev_unlock(hdev);
899
900         return 0;
901 }
902
903 static int conn_latency_get(void *data, u64 *val)
904 {
905         struct hci_dev *hdev = data;
906
907         hci_dev_lock(hdev);
908         *val = hdev->le_conn_latency;
909         hci_dev_unlock(hdev);
910
911         return 0;
912 }
913
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915                         conn_latency_set, "%llu\n");
916
917 static int supervision_timeout_set(void *data, u64 val)
918 {
919         struct hci_dev *hdev = data;
920
921         if (val < 0x000a || val > 0x0c80)
922                 return -EINVAL;
923
924         hci_dev_lock(hdev);
925         hdev->le_supv_timeout = val;
926         hci_dev_unlock(hdev);
927
928         return 0;
929 }
930
931 static int supervision_timeout_get(void *data, u64 *val)
932 {
933         struct hci_dev *hdev = data;
934
935         hci_dev_lock(hdev);
936         *val = hdev->le_supv_timeout;
937         hci_dev_unlock(hdev);
938
939         return 0;
940 }
941
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943                         supervision_timeout_set, "%llu\n");
944
945 static int adv_channel_map_set(void *data, u64 val)
946 {
947         struct hci_dev *hdev = data;
948
949         if (val < 0x01 || val > 0x07)
950                 return -EINVAL;
951
952         hci_dev_lock(hdev);
953         hdev->le_adv_channel_map = val;
954         hci_dev_unlock(hdev);
955
956         return 0;
957 }
958
959 static int adv_channel_map_get(void *data, u64 *val)
960 {
961         struct hci_dev *hdev = data;
962
963         hci_dev_lock(hdev);
964         *val = hdev->le_adv_channel_map;
965         hci_dev_unlock(hdev);
966
967         return 0;
968 }
969
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971                         adv_channel_map_set, "%llu\n");
972
973 static int adv_min_interval_set(void *data, u64 val)
974 {
975         struct hci_dev *hdev = data;
976
977         if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
978                 return -EINVAL;
979
980         hci_dev_lock(hdev);
981         hdev->le_adv_min_interval = val;
982         hci_dev_unlock(hdev);
983
984         return 0;
985 }
986
987 static int adv_min_interval_get(void *data, u64 *val)
988 {
989         struct hci_dev *hdev = data;
990
991         hci_dev_lock(hdev);
992         *val = hdev->le_adv_min_interval;
993         hci_dev_unlock(hdev);
994
995         return 0;
996 }
997
998 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999                         adv_min_interval_set, "%llu\n");
1000
1001 static int adv_max_interval_set(void *data, u64 val)
1002 {
1003         struct hci_dev *hdev = data;
1004
1005         if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1006                 return -EINVAL;
1007
1008         hci_dev_lock(hdev);
1009         hdev->le_adv_max_interval = val;
1010         hci_dev_unlock(hdev);
1011
1012         return 0;
1013 }
1014
1015 static int adv_max_interval_get(void *data, u64 *val)
1016 {
1017         struct hci_dev *hdev = data;
1018
1019         hci_dev_lock(hdev);
1020         *val = hdev->le_adv_max_interval;
1021         hci_dev_unlock(hdev);
1022
1023         return 0;
1024 }
1025
1026 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027                         adv_max_interval_set, "%llu\n");
1028
1029 static int device_list_show(struct seq_file *f, void *ptr)
1030 {
1031         struct hci_dev *hdev = f->private;
1032         struct hci_conn_params *p;
1033
1034         hci_dev_lock(hdev);
1035         list_for_each_entry(p, &hdev->le_conn_params, list) {
1036                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
1037                            p->auto_connect);
1038         }
1039         hci_dev_unlock(hdev);
1040
1041         return 0;
1042 }
1043
1044 static int device_list_open(struct inode *inode, struct file *file)
1045 {
1046         return single_open(file, device_list_show, inode->i_private);
1047 }
1048
1049 static const struct file_operations device_list_fops = {
1050         .open           = device_list_open,
1051         .read           = seq_read,
1052         .llseek         = seq_lseek,
1053         .release        = single_release,
1054 };
1055
1056 /* ---- HCI requests ---- */
1057
1058 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1059 {
1060         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1061
1062         if (hdev->req_status == HCI_REQ_PEND) {
1063                 hdev->req_result = result;
1064                 hdev->req_status = HCI_REQ_DONE;
1065                 wake_up_interruptible(&hdev->req_wait_q);
1066         }
1067 }
1068
1069 static void hci_req_cancel(struct hci_dev *hdev, int err)
1070 {
1071         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1072
1073         if (hdev->req_status == HCI_REQ_PEND) {
1074                 hdev->req_result = err;
1075                 hdev->req_status = HCI_REQ_CANCELED;
1076                 wake_up_interruptible(&hdev->req_wait_q);
1077         }
1078 }
1079
1080 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1081                                             u8 event)
1082 {
1083         struct hci_ev_cmd_complete *ev;
1084         struct hci_event_hdr *hdr;
1085         struct sk_buff *skb;
1086
1087         hci_dev_lock(hdev);
1088
1089         skb = hdev->recv_evt;
1090         hdev->recv_evt = NULL;
1091
1092         hci_dev_unlock(hdev);
1093
1094         if (!skb)
1095                 return ERR_PTR(-ENODATA);
1096
1097         if (skb->len < sizeof(*hdr)) {
1098                 BT_ERR("Too short HCI event");
1099                 goto failed;
1100         }
1101
1102         hdr = (void *) skb->data;
1103         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1104
1105         if (event) {
1106                 if (hdr->evt != event)
1107                         goto failed;
1108                 return skb;
1109         }
1110
1111         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1113                 goto failed;
1114         }
1115
1116         if (skb->len < sizeof(*ev)) {
1117                 BT_ERR("Too short cmd_complete event");
1118                 goto failed;
1119         }
1120
1121         ev = (void *) skb->data;
1122         skb_pull(skb, sizeof(*ev));
1123
1124         if (opcode == __le16_to_cpu(ev->opcode))
1125                 return skb;
1126
1127         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128                __le16_to_cpu(ev->opcode));
1129
1130 failed:
1131         kfree_skb(skb);
1132         return ERR_PTR(-ENODATA);
1133 }
1134
1135 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1136                                   const void *param, u8 event, u32 timeout)
1137 {
1138         DECLARE_WAITQUEUE(wait, current);
1139         struct hci_request req;
1140         int err = 0;
1141
1142         BT_DBG("%s", hdev->name);
1143
1144         hci_req_init(&req, hdev);
1145
1146         hci_req_add_ev(&req, opcode, plen, param, event);
1147
1148         hdev->req_status = HCI_REQ_PEND;
1149
1150         err = hci_req_run(&req, hci_req_sync_complete);
1151         if (err < 0)
1152                 return ERR_PTR(err);
1153
1154         add_wait_queue(&hdev->req_wait_q, &wait);
1155         set_current_state(TASK_INTERRUPTIBLE);
1156
1157         schedule_timeout(timeout);
1158
1159         remove_wait_queue(&hdev->req_wait_q, &wait);
1160
1161         if (signal_pending(current))
1162                 return ERR_PTR(-EINTR);
1163
1164         switch (hdev->req_status) {
1165         case HCI_REQ_DONE:
1166                 err = -bt_to_errno(hdev->req_result);
1167                 break;
1168
1169         case HCI_REQ_CANCELED:
1170                 err = -hdev->req_result;
1171                 break;
1172
1173         default:
1174                 err = -ETIMEDOUT;
1175                 break;
1176         }
1177
1178         hdev->req_status = hdev->req_result = 0;
1179
1180         BT_DBG("%s end: err %d", hdev->name, err);
1181
1182         if (err < 0)
1183                 return ERR_PTR(err);
1184
1185         return hci_get_cmd_complete(hdev, opcode, event);
1186 }
1187 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1188
1189 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1190                                const void *param, u32 timeout)
1191 {
1192         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1193 }
1194 EXPORT_SYMBOL(__hci_cmd_sync);
1195
1196 /* Execute request and wait for completion. */
1197 static int __hci_req_sync(struct hci_dev *hdev,
1198                           void (*func)(struct hci_request *req,
1199                                       unsigned long opt),
1200                           unsigned long opt, __u32 timeout)
1201 {
1202         struct hci_request req;
1203         DECLARE_WAITQUEUE(wait, current);
1204         int err = 0;
1205
1206         BT_DBG("%s start", hdev->name);
1207
1208         hci_req_init(&req, hdev);
1209
1210         hdev->req_status = HCI_REQ_PEND;
1211
1212         func(&req, opt);
1213
1214         err = hci_req_run(&req, hci_req_sync_complete);
1215         if (err < 0) {
1216                 hdev->req_status = 0;
1217
1218                 /* ENODATA means the HCI request command queue is empty.
1219                  * This can happen when a request with conditionals doesn't
1220                  * trigger any commands to be sent. This is normal behavior
1221                  * and should not trigger an error return.
1222                  */
1223                 if (err == -ENODATA)
1224                         return 0;
1225
1226                 return err;
1227         }
1228
1229         add_wait_queue(&hdev->req_wait_q, &wait);
1230         set_current_state(TASK_INTERRUPTIBLE);
1231
1232         schedule_timeout(timeout);
1233
1234         remove_wait_queue(&hdev->req_wait_q, &wait);
1235
1236         if (signal_pending(current))
1237                 return -EINTR;
1238
1239         switch (hdev->req_status) {
1240         case HCI_REQ_DONE:
1241                 err = -bt_to_errno(hdev->req_result);
1242                 break;
1243
1244         case HCI_REQ_CANCELED:
1245                 err = -hdev->req_result;
1246                 break;
1247
1248         default:
1249                 err = -ETIMEDOUT;
1250                 break;
1251         }
1252
1253         hdev->req_status = hdev->req_result = 0;
1254
1255         BT_DBG("%s end: err %d", hdev->name, err);
1256
1257         return err;
1258 }
1259
1260 static int hci_req_sync(struct hci_dev *hdev,
1261                         void (*req)(struct hci_request *req,
1262                                     unsigned long opt),
1263                         unsigned long opt, __u32 timeout)
1264 {
1265         int ret;
1266
1267         if (!test_bit(HCI_UP, &hdev->flags))
1268                 return -ENETDOWN;
1269
1270         /* Serialize all requests */
1271         hci_req_lock(hdev);
1272         ret = __hci_req_sync(hdev, req, opt, timeout);
1273         hci_req_unlock(hdev);
1274
1275         return ret;
1276 }
1277
1278 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1279 {
1280         BT_DBG("%s %ld", req->hdev->name, opt);
1281
1282         /* Reset device */
1283         set_bit(HCI_RESET, &req->hdev->flags);
1284         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1285 }
1286
1287 static void bredr_init(struct hci_request *req)
1288 {
1289         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1290
1291         /* Read Local Supported Features */
1292         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1293
1294         /* Read Local Version */
1295         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1296
1297         /* Read BD Address */
1298         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1299 }
1300
1301 static void amp_init(struct hci_request *req)
1302 {
1303         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1304
1305         /* Read Local Version */
1306         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1307
1308         /* Read Local Supported Commands */
1309         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1310
1311         /* Read Local Supported Features */
1312         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1313
1314         /* Read Local AMP Info */
1315         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1316
1317         /* Read Data Blk size */
1318         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1319
1320         /* Read Flow Control Mode */
1321         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1322
1323         /* Read Location Data */
1324         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1325 }
1326
1327 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1328 {
1329         struct hci_dev *hdev = req->hdev;
1330
1331         BT_DBG("%s %ld", hdev->name, opt);
1332
1333         /* Reset */
1334         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1335                 hci_reset_req(req, 0);
1336
1337         switch (hdev->dev_type) {
1338         case HCI_BREDR:
1339                 bredr_init(req);
1340                 break;
1341
1342         case HCI_AMP:
1343                 amp_init(req);
1344                 break;
1345
1346         default:
1347                 BT_ERR("Unknown device type %d", hdev->dev_type);
1348                 break;
1349         }
1350 }
1351
1352 static void bredr_setup(struct hci_request *req)
1353 {
1354         struct hci_dev *hdev = req->hdev;
1355
1356         __le16 param;
1357         __u8 flt_type;
1358
1359         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1360         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1361
1362         /* Read Class of Device */
1363         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1364
1365         /* Read Local Name */
1366         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1367
1368         /* Read Voice Setting */
1369         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1370
1371         /* Read Number of Supported IAC */
1372         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1373
1374         /* Read Current IAC LAP */
1375         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1376
1377         /* Clear Event Filters */
1378         flt_type = HCI_FLT_CLEAR_ALL;
1379         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1380
1381         /* Connection accept timeout ~20 secs */
1382         param = cpu_to_le16(0x7d00);
1383         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1384
1385         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1386          * but it does not support page scan related HCI commands.
1387          */
1388         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1389                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1390                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1391         }
1392 }
1393
1394 static void le_setup(struct hci_request *req)
1395 {
1396         struct hci_dev *hdev = req->hdev;
1397
1398         /* Read LE Buffer Size */
1399         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1400
1401         /* Read LE Local Supported Features */
1402         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1403
1404         /* Read LE Supported States */
1405         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1406
1407         /* Read LE White List Size */
1408         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1409
1410         /* Clear LE White List */
1411         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1412
1413         /* LE-only controllers have LE implicitly enabled */
1414         if (!lmp_bredr_capable(hdev))
1415                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1416 }
1417
1418 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1419 {
1420         if (lmp_ext_inq_capable(hdev))
1421                 return 0x02;
1422
1423         if (lmp_inq_rssi_capable(hdev))
1424                 return 0x01;
1425
1426         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1427             hdev->lmp_subver == 0x0757)
1428                 return 0x01;
1429
1430         if (hdev->manufacturer == 15) {
1431                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1432                         return 0x01;
1433                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1434                         return 0x01;
1435                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1436                         return 0x01;
1437         }
1438
1439         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1440             hdev->lmp_subver == 0x1805)
1441                 return 0x01;
1442
1443         return 0x00;
1444 }
1445
1446 static void hci_setup_inquiry_mode(struct hci_request *req)
1447 {
1448         u8 mode;
1449
1450         mode = hci_get_inquiry_mode(req->hdev);
1451
1452         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1453 }
1454
1455 static void hci_setup_event_mask(struct hci_request *req)
1456 {
1457         struct hci_dev *hdev = req->hdev;
1458
1459         /* The second byte is 0xff instead of 0x9f (two reserved bits
1460          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1461          * command otherwise.
1462          */
1463         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1464
1465         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1466          * any event mask for pre 1.2 devices.
1467          */
1468         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1469                 return;
1470
1471         if (lmp_bredr_capable(hdev)) {
1472                 events[4] |= 0x01; /* Flow Specification Complete */
1473                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1474                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1475                 events[5] |= 0x08; /* Synchronous Connection Complete */
1476                 events[5] |= 0x10; /* Synchronous Connection Changed */
1477         } else {
1478                 /* Use a different default for LE-only devices */
1479                 memset(events, 0, sizeof(events));
1480                 events[0] |= 0x10; /* Disconnection Complete */
1481                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1482                 events[1] |= 0x20; /* Command Complete */
1483                 events[1] |= 0x40; /* Command Status */
1484                 events[1] |= 0x80; /* Hardware Error */
1485                 events[2] |= 0x04; /* Number of Completed Packets */
1486                 events[3] |= 0x02; /* Data Buffer Overflow */
1487
1488                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1489                         events[0] |= 0x80; /* Encryption Change */
1490                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1491                 }
1492         }
1493
1494         if (lmp_inq_rssi_capable(hdev))
1495                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496
1497         if (lmp_sniffsubr_capable(hdev))
1498                 events[5] |= 0x20; /* Sniff Subrating */
1499
1500         if (lmp_pause_enc_capable(hdev))
1501                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1502
1503         if (lmp_ext_inq_capable(hdev))
1504                 events[5] |= 0x40; /* Extended Inquiry Result */
1505
1506         if (lmp_no_flush_capable(hdev))
1507                 events[7] |= 0x01; /* Enhanced Flush Complete */
1508
1509         if (lmp_lsto_capable(hdev))
1510                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1511
1512         if (lmp_ssp_capable(hdev)) {
1513                 events[6] |= 0x01;      /* IO Capability Request */
1514                 events[6] |= 0x02;      /* IO Capability Response */
1515                 events[6] |= 0x04;      /* User Confirmation Request */
1516                 events[6] |= 0x08;      /* User Passkey Request */
1517                 events[6] |= 0x10;      /* Remote OOB Data Request */
1518                 events[6] |= 0x20;      /* Simple Pairing Complete */
1519                 events[7] |= 0x04;      /* User Passkey Notification */
1520                 events[7] |= 0x08;      /* Keypress Notification */
1521                 events[7] |= 0x10;      /* Remote Host Supported
1522                                          * Features Notification
1523                                          */
1524         }
1525
1526         if (lmp_le_capable(hdev))
1527                 events[7] |= 0x20;      /* LE Meta-Event */
1528
1529         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1530 }
1531
1532 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1533 {
1534         struct hci_dev *hdev = req->hdev;
1535
1536         if (lmp_bredr_capable(hdev))
1537                 bredr_setup(req);
1538         else
1539                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1540
1541         if (lmp_le_capable(hdev))
1542                 le_setup(req);
1543
1544         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1545          * local supported commands HCI command.
1546          */
1547         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1548                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1549
1550         if (lmp_ssp_capable(hdev)) {
1551                 /* When SSP is available, then the host features page
1552                  * should also be available as well. However some
1553                  * controllers list the max_page as 0 as long as SSP
1554                  * has not been enabled. To achieve proper debugging
1555                  * output, force the minimum max_page to 1 at least.
1556                  */
1557                 hdev->max_page = 0x01;
1558
1559                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1560                         u8 mode = 0x01;
1561                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1562                                     sizeof(mode), &mode);
1563                 } else {
1564                         struct hci_cp_write_eir cp;
1565
1566                         memset(hdev->eir, 0, sizeof(hdev->eir));
1567                         memset(&cp, 0, sizeof(cp));
1568
1569                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1570                 }
1571         }
1572
1573         if (lmp_inq_rssi_capable(hdev))
1574                 hci_setup_inquiry_mode(req);
1575
1576         if (lmp_inq_tx_pwr_capable(hdev))
1577                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1578
1579         if (lmp_ext_feat_capable(hdev)) {
1580                 struct hci_cp_read_local_ext_features cp;
1581
1582                 cp.page = 0x01;
1583                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1584                             sizeof(cp), &cp);
1585         }
1586
1587         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1588                 u8 enable = 1;
1589                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1590                             &enable);
1591         }
1592 }
1593
1594 static void hci_setup_link_policy(struct hci_request *req)
1595 {
1596         struct hci_dev *hdev = req->hdev;
1597         struct hci_cp_write_def_link_policy cp;
1598         u16 link_policy = 0;
1599
1600         if (lmp_rswitch_capable(hdev))
1601                 link_policy |= HCI_LP_RSWITCH;
1602         if (lmp_hold_capable(hdev))
1603                 link_policy |= HCI_LP_HOLD;
1604         if (lmp_sniff_capable(hdev))
1605                 link_policy |= HCI_LP_SNIFF;
1606         if (lmp_park_capable(hdev))
1607                 link_policy |= HCI_LP_PARK;
1608
1609         cp.policy = cpu_to_le16(link_policy);
1610         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1611 }
1612
1613 static void hci_set_le_support(struct hci_request *req)
1614 {
1615         struct hci_dev *hdev = req->hdev;
1616         struct hci_cp_write_le_host_supported cp;
1617
1618         /* LE-only devices do not support explicit enablement */
1619         if (!lmp_bredr_capable(hdev))
1620                 return;
1621
1622         memset(&cp, 0, sizeof(cp));
1623
1624         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1625                 cp.le = 0x01;
1626                 cp.simul = 0x00;
1627         }
1628
1629         if (cp.le != lmp_host_le_capable(hdev))
1630                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1631                             &cp);
1632 }
1633
1634 static void hci_set_event_mask_page_2(struct hci_request *req)
1635 {
1636         struct hci_dev *hdev = req->hdev;
1637         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1638
1639         /* If Connectionless Slave Broadcast master role is supported
1640          * enable all necessary events for it.
1641          */
1642         if (lmp_csb_master_capable(hdev)) {
1643                 events[1] |= 0x40;      /* Triggered Clock Capture */
1644                 events[1] |= 0x80;      /* Synchronization Train Complete */
1645                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1646                 events[2] |= 0x20;      /* CSB Channel Map Change */
1647         }
1648
1649         /* If Connectionless Slave Broadcast slave role is supported
1650          * enable all necessary events for it.
1651          */
1652         if (lmp_csb_slave_capable(hdev)) {
1653                 events[2] |= 0x01;      /* Synchronization Train Received */
1654                 events[2] |= 0x02;      /* CSB Receive */
1655                 events[2] |= 0x04;      /* CSB Timeout */
1656                 events[2] |= 0x08;      /* Truncated Page Complete */
1657         }
1658
1659         /* Enable Authenticated Payload Timeout Expired event if supported */
1660         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1661                 events[2] |= 0x80;
1662
1663         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1664 }
1665
1666 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1667 {
1668         struct hci_dev *hdev = req->hdev;
1669         u8 p;
1670
1671         hci_setup_event_mask(req);
1672
1673         /* Some Broadcom based Bluetooth controllers do not support the
1674          * Delete Stored Link Key command. They are clearly indicating its
1675          * absence in the bit mask of supported commands.
1676          *
1677          * Check the supported commands and only if the the command is marked
1678          * as supported send it. If not supported assume that the controller
1679          * does not have actual support for stored link keys which makes this
1680          * command redundant anyway.
1681          *
1682          * Some controllers indicate that they support handling deleting
1683          * stored link keys, but they don't. The quirk lets a driver
1684          * just disable this command.
1685          */
1686         if (hdev->commands[6] & 0x80 &&
1687             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1688                 struct hci_cp_delete_stored_link_key cp;
1689
1690                 bacpy(&cp.bdaddr, BDADDR_ANY);
1691                 cp.delete_all = 0x01;
1692                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1693                             sizeof(cp), &cp);
1694         }
1695
1696         if (hdev->commands[5] & 0x10)
1697                 hci_setup_link_policy(req);
1698
1699         if (lmp_le_capable(hdev)) {
1700                 u8 events[8];
1701
1702                 memset(events, 0, sizeof(events));
1703                 events[0] = 0x0f;
1704
1705                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1706                         events[0] |= 0x10;      /* LE Long Term Key Request */
1707
1708                 /* If controller supports the Connection Parameters Request
1709                  * Link Layer Procedure, enable the corresponding event.
1710                  */
1711                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1712                         events[0] |= 0x20;      /* LE Remote Connection
1713                                                  * Parameter Request
1714                                                  */
1715
1716                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1717                             events);
1718
1719                 if (hdev->commands[25] & 0x40) {
1720                         /* Read LE Advertising Channel TX Power */
1721                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1722                 }
1723
1724                 hci_set_le_support(req);
1725         }
1726
1727         /* Read features beyond page 1 if available */
1728         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1729                 struct hci_cp_read_local_ext_features cp;
1730
1731                 cp.page = p;
1732                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1733                             sizeof(cp), &cp);
1734         }
1735 }
1736
1737 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1738 {
1739         struct hci_dev *hdev = req->hdev;
1740
1741         /* Set event mask page 2 if the HCI command for it is supported */
1742         if (hdev->commands[22] & 0x04)
1743                 hci_set_event_mask_page_2(req);
1744
1745         /* Read local codec list if the HCI command is supported */
1746         if (hdev->commands[29] & 0x20)
1747                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1748
1749         /* Get MWS transport configuration if the HCI command is supported */
1750         if (hdev->commands[30] & 0x08)
1751                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1752
1753         /* Check for Synchronization Train support */
1754         if (lmp_sync_train_capable(hdev))
1755                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1756
1757         /* Enable Secure Connections if supported and configured */
1758         if ((lmp_sc_capable(hdev) ||
1759              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1760             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1761                 u8 support = 0x01;
1762                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1763                             sizeof(support), &support);
1764         }
1765 }
1766
1767 static int __hci_init(struct hci_dev *hdev)
1768 {
1769         int err;
1770
1771         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1772         if (err < 0)
1773                 return err;
1774
1775         /* The Device Under Test (DUT) mode is special and available for
1776          * all controller types. So just create it early on.
1777          */
1778         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1779                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1780                                     &dut_mode_fops);
1781         }
1782
1783         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1784          * BR/EDR/LE type controllers. AMP controllers only need the
1785          * first stage init.
1786          */
1787         if (hdev->dev_type != HCI_BREDR)
1788                 return 0;
1789
1790         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1791         if (err < 0)
1792                 return err;
1793
1794         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1795         if (err < 0)
1796                 return err;
1797
1798         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1799         if (err < 0)
1800                 return err;
1801
1802         /* Only create debugfs entries during the initial setup
1803          * phase and not every time the controller gets powered on.
1804          */
1805         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1806                 return 0;
1807
1808         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1809                             &features_fops);
1810         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1811                            &hdev->manufacturer);
1812         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1813         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1814         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1815                             &blacklist_fops);
1816         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1817                             &whitelist_fops);
1818         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1819
1820         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1821                             &conn_info_min_age_fops);
1822         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1823                             &conn_info_max_age_fops);
1824
1825         if (lmp_bredr_capable(hdev)) {
1826                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1827                                     hdev, &inquiry_cache_fops);
1828                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1829                                     hdev, &link_keys_fops);
1830                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1831                                     hdev, &dev_class_fops);
1832                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1833                                     hdev, &voice_setting_fops);
1834         }
1835
1836         if (lmp_ssp_capable(hdev)) {
1837                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1838                                     hdev, &auto_accept_delay_fops);
1839                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1840                                     hdev, &force_sc_support_fops);
1841                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1842                                     hdev, &sc_only_mode_fops);
1843         }
1844
1845         if (lmp_sniff_capable(hdev)) {
1846                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1847                                     hdev, &idle_timeout_fops);
1848                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1849                                     hdev, &sniff_min_interval_fops);
1850                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1851                                     hdev, &sniff_max_interval_fops);
1852         }
1853
1854         if (lmp_le_capable(hdev)) {
1855                 debugfs_create_file("identity", 0400, hdev->debugfs,
1856                                     hdev, &identity_fops);
1857                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1858                                     hdev, &rpa_timeout_fops);
1859                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1860                                     hdev, &random_address_fops);
1861                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1862                                     hdev, &static_address_fops);
1863
1864                 /* For controllers with a public address, provide a debug
1865                  * option to force the usage of the configured static
1866                  * address. By default the public address is used.
1867                  */
1868                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1869                         debugfs_create_file("force_static_address", 0644,
1870                                             hdev->debugfs, hdev,
1871                                             &force_static_address_fops);
1872
1873                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1874                                   &hdev->le_white_list_size);
1875                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1876                                     &white_list_fops);
1877                 debugfs_create_file("identity_resolving_keys", 0400,
1878                                     hdev->debugfs, hdev,
1879                                     &identity_resolving_keys_fops);
1880                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1881                                     hdev, &long_term_keys_fops);
1882                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1883                                     hdev, &conn_min_interval_fops);
1884                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1885                                     hdev, &conn_max_interval_fops);
1886                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1887                                     hdev, &conn_latency_fops);
1888                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1889                                     hdev, &supervision_timeout_fops);
1890                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1891                                     hdev, &adv_channel_map_fops);
1892                 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1893                                     hdev, &adv_min_interval_fops);
1894                 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1895                                     hdev, &adv_max_interval_fops);
1896                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1897                                     &device_list_fops);
1898                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1899                                    hdev->debugfs,
1900                                    &hdev->discov_interleaved_timeout);
1901         }
1902
1903         return 0;
1904 }
1905
1906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1907 {
1908         struct hci_dev *hdev = req->hdev;
1909
1910         BT_DBG("%s %ld", hdev->name, opt);
1911
1912         /* Reset */
1913         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1914                 hci_reset_req(req, 0);
1915
1916         /* Read Local Version */
1917         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1918
1919         /* Read BD Address */
1920         if (hdev->set_bdaddr)
1921                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1922 }
1923
1924 static int __hci_unconf_init(struct hci_dev *hdev)
1925 {
1926         int err;
1927
1928         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1929                 return 0;
1930
1931         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1932         if (err < 0)
1933                 return err;
1934
1935         return 0;
1936 }
1937
1938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1939 {
1940         __u8 scan = opt;
1941
1942         BT_DBG("%s %x", req->hdev->name, scan);
1943
1944         /* Inquiry and Page scans */
1945         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1946 }
1947
1948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1949 {
1950         __u8 auth = opt;
1951
1952         BT_DBG("%s %x", req->hdev->name, auth);
1953
1954         /* Authentication */
1955         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1956 }
1957
1958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1959 {
1960         __u8 encrypt = opt;
1961
1962         BT_DBG("%s %x", req->hdev->name, encrypt);
1963
1964         /* Encryption */
1965         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1966 }
1967
1968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1969 {
1970         __le16 policy = cpu_to_le16(opt);
1971
1972         BT_DBG("%s %x", req->hdev->name, policy);
1973
1974         /* Default link policy */
1975         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1976 }
1977
1978 /* Get HCI device by index.
1979  * Device is held on return. */
1980 struct hci_dev *hci_dev_get(int index)
1981 {
1982         struct hci_dev *hdev = NULL, *d;
1983
1984         BT_DBG("%d", index);
1985
1986         if (index < 0)
1987                 return NULL;
1988
1989         read_lock(&hci_dev_list_lock);
1990         list_for_each_entry(d, &hci_dev_list, list) {
1991                 if (d->id == index) {
1992                         hdev = hci_dev_hold(d);
1993                         break;
1994                 }
1995         }
1996         read_unlock(&hci_dev_list_lock);
1997         return hdev;
1998 }
1999
2000 /* ---- Inquiry support ---- */
2001
2002 bool hci_discovery_active(struct hci_dev *hdev)
2003 {
2004         struct discovery_state *discov = &hdev->discovery;
2005
2006         switch (discov->state) {
2007         case DISCOVERY_FINDING:
2008         case DISCOVERY_RESOLVING:
2009                 return true;
2010
2011         default:
2012                 return false;
2013         }
2014 }
2015
2016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2017 {
2018         int old_state = hdev->discovery.state;
2019
2020         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2021
2022         if (old_state == state)
2023                 return;
2024
2025         hdev->discovery.state = state;
2026
2027         switch (state) {
2028         case DISCOVERY_STOPPED:
2029                 hci_update_background_scan(hdev);
2030
2031                 if (old_state != DISCOVERY_STARTING)
2032                         mgmt_discovering(hdev, 0);
2033                 break;
2034         case DISCOVERY_STARTING:
2035                 break;
2036         case DISCOVERY_FINDING:
2037                 mgmt_discovering(hdev, 1);
2038                 break;
2039         case DISCOVERY_RESOLVING:
2040                 break;
2041         case DISCOVERY_STOPPING:
2042                 break;
2043         }
2044 }
2045
2046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2047 {
2048         struct discovery_state *cache = &hdev->discovery;
2049         struct inquiry_entry *p, *n;
2050
2051         list_for_each_entry_safe(p, n, &cache->all, all) {
2052                 list_del(&p->all);
2053                 kfree(p);
2054         }
2055
2056         INIT_LIST_HEAD(&cache->unknown);
2057         INIT_LIST_HEAD(&cache->resolve);
2058 }
2059
2060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2061                                                bdaddr_t *bdaddr)
2062 {
2063         struct discovery_state *cache = &hdev->discovery;
2064         struct inquiry_entry *e;
2065
2066         BT_DBG("cache %p, %pMR", cache, bdaddr);
2067
2068         list_for_each_entry(e, &cache->all, all) {
2069                 if (!bacmp(&e->data.bdaddr, bdaddr))
2070                         return e;
2071         }
2072
2073         return NULL;
2074 }
2075
2076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2077                                                        bdaddr_t *bdaddr)
2078 {
2079         struct discovery_state *cache = &hdev->discovery;
2080         struct inquiry_entry *e;
2081
2082         BT_DBG("cache %p, %pMR", cache, bdaddr);
2083
2084         list_for_each_entry(e, &cache->unknown, list) {
2085                 if (!bacmp(&e->data.bdaddr, bdaddr))
2086                         return e;
2087         }
2088
2089         return NULL;
2090 }
2091
2092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2093                                                        bdaddr_t *bdaddr,
2094                                                        int state)
2095 {
2096         struct discovery_state *cache = &hdev->discovery;
2097         struct inquiry_entry *e;
2098
2099         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2100
2101         list_for_each_entry(e, &cache->resolve, list) {
2102                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2103                         return e;
2104                 if (!bacmp(&e->data.bdaddr, bdaddr))
2105                         return e;
2106         }
2107
2108         return NULL;
2109 }
2110
2111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2112                                       struct inquiry_entry *ie)
2113 {
2114         struct discovery_state *cache = &hdev->discovery;
2115         struct list_head *pos = &cache->resolve;
2116         struct inquiry_entry *p;
2117
2118         list_del(&ie->list);
2119
2120         list_for_each_entry(p, &cache->resolve, list) {
2121                 if (p->name_state != NAME_PENDING &&
2122                     abs(p->data.rssi) >= abs(ie->data.rssi))
2123                         break;
2124                 pos = &p->list;
2125         }
2126
2127         list_add(&ie->list, pos);
2128 }
2129
2130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2131                              bool name_known)
2132 {
2133         struct discovery_state *cache = &hdev->discovery;
2134         struct inquiry_entry *ie;
2135         u32 flags = 0;
2136
2137         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2138
2139         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2140
2141         if (!data->ssp_mode)
2142                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2143
2144         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2145         if (ie) {
2146                 if (!ie->data.ssp_mode)
2147                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2148
2149                 if (ie->name_state == NAME_NEEDED &&
2150                     data->rssi != ie->data.rssi) {
2151                         ie->data.rssi = data->rssi;
2152                         hci_inquiry_cache_update_resolve(hdev, ie);
2153                 }
2154
2155                 goto update;
2156         }
2157
2158         /* Entry not in the cache. Add new one. */
2159         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2160         if (!ie) {
2161                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2162                 goto done;
2163         }
2164
2165         list_add(&ie->all, &cache->all);
2166
2167         if (name_known) {
2168                 ie->name_state = NAME_KNOWN;
2169         } else {
2170                 ie->name_state = NAME_NOT_KNOWN;
2171                 list_add(&ie->list, &cache->unknown);
2172         }
2173
2174 update:
2175         if (name_known && ie->name_state != NAME_KNOWN &&
2176             ie->name_state != NAME_PENDING) {
2177                 ie->name_state = NAME_KNOWN;
2178                 list_del(&ie->list);
2179         }
2180
2181         memcpy(&ie->data, data, sizeof(*data));
2182         ie->timestamp = jiffies;
2183         cache->timestamp = jiffies;
2184
2185         if (ie->name_state == NAME_NOT_KNOWN)
2186                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2187
2188 done:
2189         return flags;
2190 }
2191
2192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2193 {
2194         struct discovery_state *cache = &hdev->discovery;
2195         struct inquiry_info *info = (struct inquiry_info *) buf;
2196         struct inquiry_entry *e;
2197         int copied = 0;
2198
2199         list_for_each_entry(e, &cache->all, all) {
2200                 struct inquiry_data *data = &e->data;
2201
2202                 if (copied >= num)
2203                         break;
2204
2205                 bacpy(&info->bdaddr, &data->bdaddr);
2206                 info->pscan_rep_mode    = data->pscan_rep_mode;
2207                 info->pscan_period_mode = data->pscan_period_mode;
2208                 info->pscan_mode        = data->pscan_mode;
2209                 memcpy(info->dev_class, data->dev_class, 3);
2210                 info->clock_offset      = data->clock_offset;
2211
2212                 info++;
2213                 copied++;
2214         }
2215
2216         BT_DBG("cache %p, copied %d", cache, copied);
2217         return copied;
2218 }
2219
2220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2221 {
2222         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2223         struct hci_dev *hdev = req->hdev;
2224         struct hci_cp_inquiry cp;
2225
2226         BT_DBG("%s", hdev->name);
2227
2228         if (test_bit(HCI_INQUIRY, &hdev->flags))
2229                 return;
2230
2231         /* Start Inquiry */
2232         memcpy(&cp.lap, &ir->lap, 3);
2233         cp.length  = ir->length;
2234         cp.num_rsp = ir->num_rsp;
2235         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2236 }
2237
2238 static int wait_inquiry(void *word)
2239 {
2240         schedule();
2241         return signal_pending(current);
2242 }
2243
2244 int hci_inquiry(void __user *arg)
2245 {
2246         __u8 __user *ptr = arg;
2247         struct hci_inquiry_req ir;
2248         struct hci_dev *hdev;
2249         int err = 0, do_inquiry = 0, max_rsp;
2250         long timeo;
2251         __u8 *buf;
2252
2253         if (copy_from_user(&ir, ptr, sizeof(ir)))
2254                 return -EFAULT;
2255
2256         hdev = hci_dev_get(ir.dev_id);
2257         if (!hdev)
2258                 return -ENODEV;
2259
2260         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2261                 err = -EBUSY;
2262                 goto done;
2263         }
2264
2265         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2266                 err = -EOPNOTSUPP;
2267                 goto done;
2268         }
2269
2270         if (hdev->dev_type != HCI_BREDR) {
2271                 err = -EOPNOTSUPP;
2272                 goto done;
2273         }
2274
2275         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2276                 err = -EOPNOTSUPP;
2277                 goto done;
2278         }
2279
2280         hci_dev_lock(hdev);
2281         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2282             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2283                 hci_inquiry_cache_flush(hdev);
2284                 do_inquiry = 1;
2285         }
2286         hci_dev_unlock(hdev);
2287
2288         timeo = ir.length * msecs_to_jiffies(2000);
2289
2290         if (do_inquiry) {
2291                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2292                                    timeo);
2293                 if (err < 0)
2294                         goto done;
2295
2296                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2297                  * cleared). If it is interrupted by a signal, return -EINTR.
2298                  */
2299                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2300                                 TASK_INTERRUPTIBLE))
2301                         return -EINTR;
2302         }
2303
2304         /* for unlimited number of responses we will use buffer with
2305          * 255 entries
2306          */
2307         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2308
2309         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2310          * copy it to the user space.
2311          */
2312         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2313         if (!buf) {
2314                 err = -ENOMEM;
2315                 goto done;
2316         }
2317
2318         hci_dev_lock(hdev);
2319         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2320         hci_dev_unlock(hdev);
2321
2322         BT_DBG("num_rsp %d", ir.num_rsp);
2323
2324         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2325                 ptr += sizeof(ir);
2326                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2327                                  ir.num_rsp))
2328                         err = -EFAULT;
2329         } else
2330                 err = -EFAULT;
2331
2332         kfree(buf);
2333
2334 done:
2335         hci_dev_put(hdev);
2336         return err;
2337 }
2338
2339 static int hci_dev_do_open(struct hci_dev *hdev)
2340 {
2341         int ret = 0;
2342
2343         BT_DBG("%s %p", hdev->name, hdev);
2344
2345         hci_req_lock(hdev);
2346
2347         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2348                 ret = -ENODEV;
2349                 goto done;
2350         }
2351
2352         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2353             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2354                 /* Check for rfkill but allow the HCI setup stage to
2355                  * proceed (which in itself doesn't cause any RF activity).
2356                  */
2357                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2358                         ret = -ERFKILL;
2359                         goto done;
2360                 }
2361
2362                 /* Check for valid public address or a configured static
2363                  * random adddress, but let the HCI setup proceed to
2364                  * be able to determine if there is a public address
2365                  * or not.
2366                  *
2367                  * In case of user channel usage, it is not important
2368                  * if a public address or static random address is
2369                  * available.
2370                  *
2371                  * This check is only valid for BR/EDR controllers
2372                  * since AMP controllers do not have an address.
2373                  */
2374                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2375                     hdev->dev_type == HCI_BREDR &&
2376                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2377                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2378                         ret = -EADDRNOTAVAIL;
2379                         goto done;
2380                 }
2381         }
2382
2383         if (test_bit(HCI_UP, &hdev->flags)) {
2384                 ret = -EALREADY;
2385                 goto done;
2386         }
2387
2388         if (hdev->open(hdev)) {
2389                 ret = -EIO;
2390                 goto done;
2391         }
2392
2393         atomic_set(&hdev->cmd_cnt, 1);
2394         set_bit(HCI_INIT, &hdev->flags);
2395
2396         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2397                 if (hdev->setup)
2398                         ret = hdev->setup(hdev);
2399
2400                 /* The transport driver can set these quirks before
2401                  * creating the HCI device or in its setup callback.
2402                  *
2403                  * In case any of them is set, the controller has to
2404                  * start up as unconfigured.
2405                  */
2406                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2407                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2408                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2409
2410                 /* For an unconfigured controller it is required to
2411                  * read at least the version information provided by
2412                  * the Read Local Version Information command.
2413                  *
2414                  * If the set_bdaddr driver callback is provided, then
2415                  * also the original Bluetooth public device address
2416                  * will be read using the Read BD Address command.
2417                  */
2418                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2419                         ret = __hci_unconf_init(hdev);
2420         }
2421
2422         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2423                 /* If public address change is configured, ensure that
2424                  * the address gets programmed. If the driver does not
2425                  * support changing the public address, fail the power
2426                  * on procedure.
2427                  */
2428                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2429                     hdev->set_bdaddr)
2430                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2431                 else
2432                         ret = -EADDRNOTAVAIL;
2433         }
2434
2435         if (!ret) {
2436                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2437                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2438                         ret = __hci_init(hdev);
2439         }
2440
2441         clear_bit(HCI_INIT, &hdev->flags);
2442
2443         if (!ret) {
2444                 hci_dev_hold(hdev);
2445                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2446                 set_bit(HCI_UP, &hdev->flags);
2447                 hci_notify(hdev, HCI_DEV_UP);
2448                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2449                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2450                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2451                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2452                     hdev->dev_type == HCI_BREDR) {
2453                         hci_dev_lock(hdev);
2454                         mgmt_powered(hdev, 1);
2455                         hci_dev_unlock(hdev);
2456                 }
2457         } else {
2458                 /* Init failed, cleanup */
2459                 flush_work(&hdev->tx_work);
2460                 flush_work(&hdev->cmd_work);
2461                 flush_work(&hdev->rx_work);
2462
2463                 skb_queue_purge(&hdev->cmd_q);
2464                 skb_queue_purge(&hdev->rx_q);
2465
2466                 if (hdev->flush)
2467                         hdev->flush(hdev);
2468
2469                 if (hdev->sent_cmd) {
2470                         kfree_skb(hdev->sent_cmd);
2471                         hdev->sent_cmd = NULL;
2472                 }
2473
2474                 hdev->close(hdev);
2475                 hdev->flags &= BIT(HCI_RAW);
2476         }
2477
2478 done:
2479         hci_req_unlock(hdev);
2480         return ret;
2481 }
2482
2483 /* ---- HCI ioctl helpers ---- */
2484
2485 int hci_dev_open(__u16 dev)
2486 {
2487         struct hci_dev *hdev;
2488         int err;
2489
2490         hdev = hci_dev_get(dev);
2491         if (!hdev)
2492                 return -ENODEV;
2493
2494         /* Devices that are marked as unconfigured can only be powered
2495          * up as user channel. Trying to bring them up as normal devices
2496          * will result into a failure. Only user channel operation is
2497          * possible.
2498          *
2499          * When this function is called for a user channel, the flag
2500          * HCI_USER_CHANNEL will be set first before attempting to
2501          * open the device.
2502          */
2503         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2504             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2505                 err = -EOPNOTSUPP;
2506                 goto done;
2507         }
2508
2509         /* We need to ensure that no other power on/off work is pending
2510          * before proceeding to call hci_dev_do_open. This is
2511          * particularly important if the setup procedure has not yet
2512          * completed.
2513          */
2514         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2515                 cancel_delayed_work(&hdev->power_off);
2516
2517         /* After this call it is guaranteed that the setup procedure
2518          * has finished. This means that error conditions like RFKILL
2519          * or no valid public or static random address apply.
2520          */
2521         flush_workqueue(hdev->req_workqueue);
2522
2523         /* For controllers not using the management interface and that
2524          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2525          * so that pairing works for them. Once the management interface
2526          * is in use this bit will be cleared again and userspace has
2527          * to explicitly enable it.
2528          */
2529         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2530             !test_bit(HCI_MGMT, &hdev->dev_flags))
2531                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2532
2533         err = hci_dev_do_open(hdev);
2534
2535 done:
2536         hci_dev_put(hdev);
2537         return err;
2538 }
2539
2540 /* This function requires the caller holds hdev->lock */
2541 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2542 {
2543         struct hci_conn_params *p;
2544
2545         list_for_each_entry(p, &hdev->le_conn_params, list)
2546                 list_del_init(&p->action);
2547
2548         BT_DBG("All LE pending actions cleared");
2549 }
2550
2551 static int hci_dev_do_close(struct hci_dev *hdev)
2552 {
2553         BT_DBG("%s %p", hdev->name, hdev);
2554
2555         cancel_delayed_work(&hdev->power_off);
2556
2557         hci_req_cancel(hdev, ENODEV);
2558         hci_req_lock(hdev);
2559
2560         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2561                 cancel_delayed_work_sync(&hdev->cmd_timer);
2562                 hci_req_unlock(hdev);
2563                 return 0;
2564         }
2565
2566         /* Flush RX and TX works */
2567         flush_work(&hdev->tx_work);
2568         flush_work(&hdev->rx_work);
2569
2570         if (hdev->discov_timeout > 0) {
2571                 cancel_delayed_work(&hdev->discov_off);
2572                 hdev->discov_timeout = 0;
2573                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2574                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2575         }
2576
2577         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2578                 cancel_delayed_work(&hdev->service_cache);
2579
2580         cancel_delayed_work_sync(&hdev->le_scan_disable);
2581
2582         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2583                 cancel_delayed_work_sync(&hdev->rpa_expired);
2584
2585         hci_dev_lock(hdev);
2586         hci_inquiry_cache_flush(hdev);
2587         hci_conn_hash_flush(hdev);
2588         hci_pend_le_actions_clear(hdev);
2589         hci_dev_unlock(hdev);
2590
2591         hci_notify(hdev, HCI_DEV_DOWN);
2592
2593         if (hdev->flush)
2594                 hdev->flush(hdev);
2595
2596         /* Reset device */
2597         skb_queue_purge(&hdev->cmd_q);
2598         atomic_set(&hdev->cmd_cnt, 1);
2599         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2600             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2601             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2602                 set_bit(HCI_INIT, &hdev->flags);
2603                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2604                 clear_bit(HCI_INIT, &hdev->flags);
2605         }
2606
2607         /* flush cmd  work */
2608         flush_work(&hdev->cmd_work);
2609
2610         /* Drop queues */
2611         skb_queue_purge(&hdev->rx_q);
2612         skb_queue_purge(&hdev->cmd_q);
2613         skb_queue_purge(&hdev->raw_q);
2614
2615         /* Drop last sent command */
2616         if (hdev->sent_cmd) {
2617                 cancel_delayed_work_sync(&hdev->cmd_timer);
2618                 kfree_skb(hdev->sent_cmd);
2619                 hdev->sent_cmd = NULL;
2620         }
2621
2622         kfree_skb(hdev->recv_evt);
2623         hdev->recv_evt = NULL;
2624
2625         /* After this point our queues are empty
2626          * and no tasks are scheduled. */
2627         hdev->close(hdev);
2628
2629         /* Clear flags */
2630         hdev->flags &= BIT(HCI_RAW);
2631         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2632
2633         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2634                 if (hdev->dev_type == HCI_BREDR) {
2635                         hci_dev_lock(hdev);
2636                         mgmt_powered(hdev, 0);
2637                         hci_dev_unlock(hdev);
2638                 }
2639         }
2640
2641         /* Controller radio is available but is currently powered down */
2642         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2643
2644         memset(hdev->eir, 0, sizeof(hdev->eir));
2645         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2646         bacpy(&hdev->random_addr, BDADDR_ANY);
2647
2648         hci_req_unlock(hdev);
2649
2650         hci_dev_put(hdev);
2651         return 0;
2652 }
2653
2654 int hci_dev_close(__u16 dev)
2655 {
2656         struct hci_dev *hdev;
2657         int err;
2658
2659         hdev = hci_dev_get(dev);
2660         if (!hdev)
2661                 return -ENODEV;
2662
2663         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2664                 err = -EBUSY;
2665                 goto done;
2666         }
2667
2668         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2669                 cancel_delayed_work(&hdev->power_off);
2670
2671         err = hci_dev_do_close(hdev);
2672
2673 done:
2674         hci_dev_put(hdev);
2675         return err;
2676 }
2677
2678 int hci_dev_reset(__u16 dev)
2679 {
2680         struct hci_dev *hdev;
2681         int ret = 0;
2682
2683         hdev = hci_dev_get(dev);
2684         if (!hdev)
2685                 return -ENODEV;
2686
2687         hci_req_lock(hdev);
2688
2689         if (!test_bit(HCI_UP, &hdev->flags)) {
2690                 ret = -ENETDOWN;
2691                 goto done;
2692         }
2693
2694         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2695                 ret = -EBUSY;
2696                 goto done;
2697         }
2698
2699         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2700                 ret = -EOPNOTSUPP;
2701                 goto done;
2702         }
2703
2704         /* Drop queues */
2705         skb_queue_purge(&hdev->rx_q);
2706         skb_queue_purge(&hdev->cmd_q);
2707
2708         hci_dev_lock(hdev);
2709         hci_inquiry_cache_flush(hdev);
2710         hci_conn_hash_flush(hdev);
2711         hci_dev_unlock(hdev);
2712
2713         if (hdev->flush)
2714                 hdev->flush(hdev);
2715
2716         atomic_set(&hdev->cmd_cnt, 1);
2717         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2718
2719         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2720
2721 done:
2722         hci_req_unlock(hdev);
2723         hci_dev_put(hdev);
2724         return ret;
2725 }
2726
2727 int hci_dev_reset_stat(__u16 dev)
2728 {
2729         struct hci_dev *hdev;
2730         int ret = 0;
2731
2732         hdev = hci_dev_get(dev);
2733         if (!hdev)
2734                 return -ENODEV;
2735
2736         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2737                 ret = -EBUSY;
2738                 goto done;
2739         }
2740
2741         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2742                 ret = -EOPNOTSUPP;
2743                 goto done;
2744         }
2745
2746         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2747
2748 done:
2749         hci_dev_put(hdev);
2750         return ret;
2751 }
2752
2753 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2754 {
2755         bool conn_changed, discov_changed;
2756
2757         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2758
2759         if ((scan & SCAN_PAGE))
2760                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2761                                                  &hdev->dev_flags);
2762         else
2763                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2764                                                   &hdev->dev_flags);
2765
2766         if ((scan & SCAN_INQUIRY)) {
2767                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2768                                                    &hdev->dev_flags);
2769         } else {
2770                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2771                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2772                                                     &hdev->dev_flags);
2773         }
2774
2775         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2776                 return;
2777
2778         if (conn_changed || discov_changed) {
2779                 /* In case this was disabled through mgmt */
2780                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2781
2782                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2783                         mgmt_update_adv_data(hdev);
2784
2785                 mgmt_new_settings(hdev);
2786         }
2787 }
2788
2789 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2790 {
2791         struct hci_dev *hdev;
2792         struct hci_dev_req dr;
2793         int err = 0;
2794
2795         if (copy_from_user(&dr, arg, sizeof(dr)))
2796                 return -EFAULT;
2797
2798         hdev = hci_dev_get(dr.dev_id);
2799         if (!hdev)
2800                 return -ENODEV;
2801
2802         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2803                 err = -EBUSY;
2804                 goto done;
2805         }
2806
2807         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2808                 err = -EOPNOTSUPP;
2809                 goto done;
2810         }
2811
2812         if (hdev->dev_type != HCI_BREDR) {
2813                 err = -EOPNOTSUPP;
2814                 goto done;
2815         }
2816
2817         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2818                 err = -EOPNOTSUPP;
2819                 goto done;
2820         }
2821
2822         switch (cmd) {
2823         case HCISETAUTH:
2824                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2825                                    HCI_INIT_TIMEOUT);
2826                 break;
2827
2828         case HCISETENCRYPT:
2829                 if (!lmp_encrypt_capable(hdev)) {
2830                         err = -EOPNOTSUPP;
2831                         break;
2832                 }
2833
2834                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2835                         /* Auth must be enabled first */
2836                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2837                                            HCI_INIT_TIMEOUT);
2838                         if (err)
2839                                 break;
2840                 }
2841
2842                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2843                                    HCI_INIT_TIMEOUT);
2844                 break;
2845
2846         case HCISETSCAN:
2847                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2848                                    HCI_INIT_TIMEOUT);
2849
2850                 /* Ensure that the connectable and discoverable states
2851                  * get correctly modified as this was a non-mgmt change.
2852                  */
2853                 if (!err)
2854                         hci_update_scan_state(hdev, dr.dev_opt);
2855                 break;
2856
2857         case HCISETLINKPOL:
2858                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2859                                    HCI_INIT_TIMEOUT);
2860                 break;
2861
2862         case HCISETLINKMODE:
2863                 hdev->link_mode = ((__u16) dr.dev_opt) &
2864                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2865                 break;
2866
2867         case HCISETPTYPE:
2868                 hdev->pkt_type = (__u16) dr.dev_opt;
2869                 break;
2870
2871         case HCISETACLMTU:
2872                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2873                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2874                 break;
2875
2876         case HCISETSCOMTU:
2877                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2878                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2879                 break;
2880
2881         default:
2882                 err = -EINVAL;
2883                 break;
2884         }
2885
2886 done:
2887         hci_dev_put(hdev);
2888         return err;
2889 }
2890
2891 int hci_get_dev_list(void __user *arg)
2892 {
2893         struct hci_dev *hdev;
2894         struct hci_dev_list_req *dl;
2895         struct hci_dev_req *dr;
2896         int n = 0, size, err;
2897         __u16 dev_num;
2898
2899         if (get_user(dev_num, (__u16 __user *) arg))
2900                 return -EFAULT;
2901
2902         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2903                 return -EINVAL;
2904
2905         size = sizeof(*dl) + dev_num * sizeof(*dr);
2906
2907         dl = kzalloc(size, GFP_KERNEL);
2908         if (!dl)
2909                 return -ENOMEM;
2910
2911         dr = dl->dev_req;
2912
2913         read_lock(&hci_dev_list_lock);
2914         list_for_each_entry(hdev, &hci_dev_list, list) {
2915                 unsigned long flags = hdev->flags;
2916
2917                 /* When the auto-off is configured it means the transport
2918                  * is running, but in that case still indicate that the
2919                  * device is actually down.
2920                  */
2921                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2922                         flags &= ~BIT(HCI_UP);
2923
2924                 (dr + n)->dev_id  = hdev->id;
2925                 (dr + n)->dev_opt = flags;
2926
2927                 if (++n >= dev_num)
2928                         break;
2929         }
2930         read_unlock(&hci_dev_list_lock);
2931
2932         dl->dev_num = n;
2933         size = sizeof(*dl) + n * sizeof(*dr);
2934
2935         err = copy_to_user(arg, dl, size);
2936         kfree(dl);
2937
2938         return err ? -EFAULT : 0;
2939 }
2940
2941 int hci_get_dev_info(void __user *arg)
2942 {
2943         struct hci_dev *hdev;
2944         struct hci_dev_info di;
2945         unsigned long flags;
2946         int err = 0;
2947
2948         if (copy_from_user(&di, arg, sizeof(di)))
2949                 return -EFAULT;
2950
2951         hdev = hci_dev_get(di.dev_id);
2952         if (!hdev)
2953                 return -ENODEV;
2954
2955         /* When the auto-off is configured it means the transport
2956          * is running, but in that case still indicate that the
2957          * device is actually down.
2958          */
2959         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2960                 flags = hdev->flags & ~BIT(HCI_UP);
2961         else
2962                 flags = hdev->flags;
2963
2964         strcpy(di.name, hdev->name);
2965         di.bdaddr   = hdev->bdaddr;
2966         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2967         di.flags    = flags;
2968         di.pkt_type = hdev->pkt_type;
2969         if (lmp_bredr_capable(hdev)) {
2970                 di.acl_mtu  = hdev->acl_mtu;
2971                 di.acl_pkts = hdev->acl_pkts;
2972                 di.sco_mtu  = hdev->sco_mtu;
2973                 di.sco_pkts = hdev->sco_pkts;
2974         } else {
2975                 di.acl_mtu  = hdev->le_mtu;
2976                 di.acl_pkts = hdev->le_pkts;
2977                 di.sco_mtu  = 0;
2978                 di.sco_pkts = 0;
2979         }
2980         di.link_policy = hdev->link_policy;
2981         di.link_mode   = hdev->link_mode;
2982
2983         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2984         memcpy(&di.features, &hdev->features, sizeof(di.features));
2985
2986         if (copy_to_user(arg, &di, sizeof(di)))
2987                 err = -EFAULT;
2988
2989         hci_dev_put(hdev);
2990
2991         return err;
2992 }
2993
2994 /* ---- Interface to HCI drivers ---- */
2995
2996 static int hci_rfkill_set_block(void *data, bool blocked)
2997 {
2998         struct hci_dev *hdev = data;
2999
3000         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3001
3002         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3003                 return -EBUSY;
3004
3005         if (blocked) {
3006                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3007                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3008                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
3009                         hci_dev_do_close(hdev);
3010         } else {
3011                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3012         }
3013
3014         return 0;
3015 }
3016
3017 static const struct rfkill_ops hci_rfkill_ops = {
3018         .set_block = hci_rfkill_set_block,
3019 };
3020
3021 static void hci_power_on(struct work_struct *work)
3022 {
3023         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3024         int err;
3025
3026         BT_DBG("%s", hdev->name);
3027
3028         err = hci_dev_do_open(hdev);
3029         if (err < 0) {
3030                 mgmt_set_powered_failed(hdev, err);
3031                 return;
3032         }
3033
3034         /* During the HCI setup phase, a few error conditions are
3035          * ignored and they need to be checked now. If they are still
3036          * valid, it is important to turn the device back off.
3037          */
3038         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3039             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3040             (hdev->dev_type == HCI_BREDR &&
3041              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3042              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3043                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3044                 hci_dev_do_close(hdev);
3045         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3046                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3047                                    HCI_AUTO_OFF_TIMEOUT);
3048         }
3049
3050         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3051                 /* For unconfigured devices, set the HCI_RAW flag
3052                  * so that userspace can easily identify them.
3053                  */
3054                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3055                         set_bit(HCI_RAW, &hdev->flags);
3056
3057                 /* For fully configured devices, this will send
3058                  * the Index Added event. For unconfigured devices,
3059                  * it will send Unconfigued Index Added event.
3060                  *
3061                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3062                  * and no event will be send.
3063                  */
3064                 mgmt_index_added(hdev);
3065         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3066                 /* When the controller is now configured, then it
3067                  * is important to clear the HCI_RAW flag.
3068                  */
3069                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3070                         clear_bit(HCI_RAW, &hdev->flags);
3071
3072                 /* Powering on the controller with HCI_CONFIG set only
3073                  * happens with the transition from unconfigured to
3074                  * configured. This will send the Index Added event.
3075                  */
3076                 mgmt_index_added(hdev);
3077         }
3078 }
3079
3080 static void hci_power_off(struct work_struct *work)
3081 {
3082         struct hci_dev *hdev = container_of(work, struct hci_dev,
3083                                             power_off.work);
3084
3085         BT_DBG("%s", hdev->name);
3086
3087         hci_dev_do_close(hdev);
3088 }
3089
3090 static void hci_discov_off(struct work_struct *work)
3091 {
3092         struct hci_dev *hdev;
3093
3094         hdev = container_of(work, struct hci_dev, discov_off.work);
3095
3096         BT_DBG("%s", hdev->name);
3097
3098         mgmt_discoverable_timeout(hdev);
3099 }
3100
3101 void hci_uuids_clear(struct hci_dev *hdev)
3102 {
3103         struct bt_uuid *uuid, *tmp;
3104
3105         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3106                 list_del(&uuid->list);
3107                 kfree(uuid);
3108         }
3109 }
3110
3111 void hci_link_keys_clear(struct hci_dev *hdev)
3112 {
3113         struct list_head *p, *n;
3114
3115         list_for_each_safe(p, n, &hdev->link_keys) {
3116                 struct link_key *key;
3117
3118                 key = list_entry(p, struct link_key, list);
3119
3120                 list_del(p);
3121                 kfree(key);
3122         }
3123 }
3124
3125 void hci_smp_ltks_clear(struct hci_dev *hdev)
3126 {
3127         struct smp_ltk *k, *tmp;
3128
3129         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3130                 list_del(&k->list);
3131                 kfree(k);
3132         }
3133 }
3134
3135 void hci_smp_irks_clear(struct hci_dev *hdev)
3136 {
3137         struct smp_irk *k, *tmp;
3138
3139         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3140                 list_del(&k->list);
3141                 kfree(k);
3142         }
3143 }
3144
3145 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3146 {
3147         struct link_key *k;
3148
3149         list_for_each_entry(k, &hdev->link_keys, list)
3150                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3151                         return k;
3152
3153         return NULL;
3154 }
3155
3156 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3157                                u8 key_type, u8 old_key_type)
3158 {
3159         /* Legacy key */
3160         if (key_type < 0x03)
3161                 return true;
3162
3163         /* Debug keys are insecure so don't store them persistently */
3164         if (key_type == HCI_LK_DEBUG_COMBINATION)
3165                 return false;
3166
3167         /* Changed combination key and there's no previous one */
3168         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3169                 return false;
3170
3171         /* Security mode 3 case */
3172         if (!conn)
3173                 return true;
3174
3175         /* Neither local nor remote side had no-bonding as requirement */
3176         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3177                 return true;
3178
3179         /* Local side had dedicated bonding as requirement */
3180         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3181                 return true;
3182
3183         /* Remote side had dedicated bonding as requirement */
3184         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3185                 return true;
3186
3187         /* If none of the above criteria match, then don't store the key
3188          * persistently */
3189         return false;
3190 }
3191
3192 static u8 ltk_role(u8 type)
3193 {
3194         if (type == SMP_LTK)
3195                 return HCI_ROLE_MASTER;
3196
3197         return HCI_ROLE_SLAVE;
3198 }
3199
3200 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3201                              u8 role)
3202 {
3203         struct smp_ltk *k;
3204
3205         list_for_each_entry(k, &hdev->long_term_keys, list) {
3206                 if (k->ediv != ediv || k->rand != rand)
3207                         continue;
3208
3209                 if (ltk_role(k->type) != role)
3210                         continue;
3211
3212                 return k;
3213         }
3214
3215         return NULL;
3216 }
3217
3218 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3219                                      u8 addr_type, u8 role)
3220 {
3221         struct smp_ltk *k;
3222
3223         list_for_each_entry(k, &hdev->long_term_keys, list)
3224                 if (addr_type == k->bdaddr_type &&
3225                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3226                     ltk_role(k->type) == role)
3227                         return k;
3228
3229         return NULL;
3230 }
3231
3232 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3233 {
3234         struct smp_irk *irk;
3235
3236         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3237                 if (!bacmp(&irk->rpa, rpa))
3238                         return irk;
3239         }
3240
3241         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3242                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3243                         bacpy(&irk->rpa, rpa);
3244                         return irk;
3245                 }
3246         }
3247
3248         return NULL;
3249 }
3250
3251 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3252                                      u8 addr_type)
3253 {
3254         struct smp_irk *irk;
3255
3256         /* Identity Address must be public or static random */
3257         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3258                 return NULL;
3259
3260         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3261                 if (addr_type == irk->addr_type &&
3262                     bacmp(bdaddr, &irk->bdaddr) == 0)
3263                         return irk;
3264         }
3265
3266         return NULL;
3267 }
3268
3269 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3270                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3271                                   u8 pin_len, bool *persistent)
3272 {
3273         struct link_key *key, *old_key;
3274         u8 old_key_type;
3275
3276         old_key = hci_find_link_key(hdev, bdaddr);
3277         if (old_key) {
3278                 old_key_type = old_key->type;
3279                 key = old_key;
3280         } else {
3281                 old_key_type = conn ? conn->key_type : 0xff;
3282                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3283                 if (!key)
3284                         return NULL;
3285                 list_add(&key->list, &hdev->link_keys);
3286         }
3287
3288         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3289
3290         /* Some buggy controller combinations generate a changed
3291          * combination key for legacy pairing even when there's no
3292          * previous key */
3293         if (type == HCI_LK_CHANGED_COMBINATION &&
3294             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3295                 type = HCI_LK_COMBINATION;
3296                 if (conn)
3297                         conn->key_type = type;
3298         }
3299
3300         bacpy(&key->bdaddr, bdaddr);
3301         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3302         key->pin_len = pin_len;
3303
3304         if (type == HCI_LK_CHANGED_COMBINATION)
3305                 key->type = old_key_type;
3306         else
3307                 key->type = type;
3308
3309         if (persistent)
3310                 *persistent = hci_persistent_key(hdev, conn, type,
3311                                                  old_key_type);
3312
3313         return key;
3314 }
3315
3316 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3317                             u8 addr_type, u8 type, u8 authenticated,
3318                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3319 {
3320         struct smp_ltk *key, *old_key;
3321         u8 role = ltk_role(type);
3322
3323         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3324         if (old_key)
3325                 key = old_key;
3326         else {
3327                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3328                 if (!key)
3329                         return NULL;
3330                 list_add(&key->list, &hdev->long_term_keys);
3331         }
3332
3333         bacpy(&key->bdaddr, bdaddr);
3334         key->bdaddr_type = addr_type;
3335         memcpy(key->val, tk, sizeof(key->val));
3336         key->authenticated = authenticated;
3337         key->ediv = ediv;
3338         key->rand = rand;
3339         key->enc_size = enc_size;
3340         key->type = type;
3341
3342         return key;
3343 }
3344
3345 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3346                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3347 {
3348         struct smp_irk *irk;
3349
3350         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3351         if (!irk) {
3352                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3353                 if (!irk)
3354                         return NULL;
3355
3356                 bacpy(&irk->bdaddr, bdaddr);
3357                 irk->addr_type = addr_type;
3358
3359                 list_add(&irk->list, &hdev->identity_resolving_keys);
3360         }
3361
3362         memcpy(irk->val, val, 16);
3363         bacpy(&irk->rpa, rpa);
3364
3365         return irk;
3366 }
3367
3368 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3369 {
3370         struct link_key *key;
3371
3372         key = hci_find_link_key(hdev, bdaddr);
3373         if (!key)
3374                 return -ENOENT;
3375
3376         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3377
3378         list_del(&key->list);
3379         kfree(key);
3380
3381         return 0;
3382 }
3383
3384 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3385 {
3386         struct smp_ltk *k, *tmp;
3387         int removed = 0;
3388
3389         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3390                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3391                         continue;
3392
3393                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3394
3395                 list_del(&k->list);
3396                 kfree(k);
3397                 removed++;
3398         }
3399
3400         return removed ? 0 : -ENOENT;
3401 }
3402
3403 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3404 {
3405         struct smp_irk *k, *tmp;
3406
3407         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3408                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3409                         continue;
3410
3411                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3412
3413                 list_del(&k->list);
3414                 kfree(k);
3415         }
3416 }
3417
3418 /* HCI command timer function */
3419 static void hci_cmd_timeout(struct work_struct *work)
3420 {
3421         struct hci_dev *hdev = container_of(work, struct hci_dev,
3422                                             cmd_timer.work);
3423
3424         if (hdev->sent_cmd) {
3425                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3426                 u16 opcode = __le16_to_cpu(sent->opcode);
3427
3428                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3429         } else {
3430                 BT_ERR("%s command tx timeout", hdev->name);
3431         }
3432
3433         atomic_set(&hdev->cmd_cnt, 1);
3434         queue_work(hdev->workqueue, &hdev->cmd_work);
3435 }
3436
3437 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3438                                           bdaddr_t *bdaddr)
3439 {
3440         struct oob_data *data;
3441
3442         list_for_each_entry(data, &hdev->remote_oob_data, list)
3443                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3444                         return data;
3445
3446         return NULL;
3447 }
3448
3449 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3450 {
3451         struct oob_data *data;
3452
3453         data = hci_find_remote_oob_data(hdev, bdaddr);
3454         if (!data)
3455                 return -ENOENT;
3456
3457         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3458
3459         list_del(&data->list);
3460         kfree(data);
3461
3462         return 0;
3463 }
3464
3465 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3466 {
3467         struct oob_data *data, *n;
3468
3469         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3470                 list_del(&data->list);
3471                 kfree(data);
3472         }
3473 }
3474
3475 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3476                             u8 *hash, u8 *randomizer)
3477 {
3478         struct oob_data *data;
3479
3480         data = hci_find_remote_oob_data(hdev, bdaddr);
3481         if (!data) {
3482                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3483                 if (!data)
3484                         return -ENOMEM;
3485
3486                 bacpy(&data->bdaddr, bdaddr);
3487                 list_add(&data->list, &hdev->remote_oob_data);
3488         }
3489
3490         memcpy(data->hash192, hash, sizeof(data->hash192));
3491         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3492
3493         memset(data->hash256, 0, sizeof(data->hash256));
3494         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3495
3496         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3497
3498         return 0;
3499 }
3500
3501 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3502                                 u8 *hash192, u8 *randomizer192,
3503                                 u8 *hash256, u8 *randomizer256)
3504 {
3505         struct oob_data *data;
3506
3507         data = hci_find_remote_oob_data(hdev, bdaddr);
3508         if (!data) {
3509                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3510                 if (!data)
3511                         return -ENOMEM;
3512
3513                 bacpy(&data->bdaddr, bdaddr);
3514                 list_add(&data->list, &hdev->remote_oob_data);
3515         }
3516
3517         memcpy(data->hash192, hash192, sizeof(data->hash192));
3518         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3519
3520         memcpy(data->hash256, hash256, sizeof(data->hash256));
3521         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3522
3523         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3524
3525         return 0;
3526 }
3527
3528 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3529                                          bdaddr_t *bdaddr, u8 type)
3530 {
3531         struct bdaddr_list *b;
3532
3533         list_for_each_entry(b, bdaddr_list, list) {
3534                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3535                         return b;
3536         }
3537
3538         return NULL;
3539 }
3540
3541 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3542 {
3543         struct list_head *p, *n;
3544
3545         list_for_each_safe(p, n, bdaddr_list) {
3546                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3547
3548                 list_del(p);
3549                 kfree(b);
3550         }
3551 }
3552
3553 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3554 {
3555         struct bdaddr_list *entry;
3556
3557         if (!bacmp(bdaddr, BDADDR_ANY))
3558                 return -EBADF;
3559
3560         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3561                 return -EEXIST;
3562
3563         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3564         if (!entry)
3565                 return -ENOMEM;
3566
3567         bacpy(&entry->bdaddr, bdaddr);
3568         entry->bdaddr_type = type;
3569
3570         list_add(&entry->list, list);
3571
3572         return 0;
3573 }
3574
3575 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3576 {
3577         struct bdaddr_list *entry;
3578
3579         if (!bacmp(bdaddr, BDADDR_ANY)) {
3580                 hci_bdaddr_list_clear(list);
3581                 return 0;
3582         }
3583
3584         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3585         if (!entry)
3586                 return -ENOENT;
3587
3588         list_del(&entry->list);
3589         kfree(entry);
3590
3591         return 0;
3592 }
3593
3594 /* This function requires the caller holds hdev->lock */
3595 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3596                                                bdaddr_t *addr, u8 addr_type)
3597 {
3598         struct hci_conn_params *params;
3599
3600         /* The conn params list only contains identity addresses */
3601         if (!hci_is_identity_address(addr, addr_type))
3602                 return NULL;
3603
3604         list_for_each_entry(params, &hdev->le_conn_params, list) {
3605                 if (bacmp(&params->addr, addr) == 0 &&
3606                     params->addr_type == addr_type) {
3607                         return params;
3608                 }
3609         }
3610
3611         return NULL;
3612 }
3613
3614 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3615 {
3616         struct hci_conn *conn;
3617
3618         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3619         if (!conn)
3620                 return false;
3621
3622         if (conn->dst_type != type)
3623                 return false;
3624
3625         if (conn->state != BT_CONNECTED)
3626                 return false;
3627
3628         return true;
3629 }
3630
3631 /* This function requires the caller holds hdev->lock */
3632 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3633                                                   bdaddr_t *addr, u8 addr_type)
3634 {
3635         struct hci_conn_params *param;
3636
3637         /* The list only contains identity addresses */
3638         if (!hci_is_identity_address(addr, addr_type))
3639                 return NULL;
3640
3641         list_for_each_entry(param, list, action) {
3642                 if (bacmp(&param->addr, addr) == 0 &&
3643                     param->addr_type == addr_type)
3644                         return param;
3645         }
3646
3647         return NULL;
3648 }
3649
3650 /* This function requires the caller holds hdev->lock */
3651 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3652                                             bdaddr_t *addr, u8 addr_type)
3653 {
3654         struct hci_conn_params *params;
3655
3656         if (!hci_is_identity_address(addr, addr_type))
3657                 return NULL;
3658
3659         params = hci_conn_params_lookup(hdev, addr, addr_type);
3660         if (params)
3661                 return params;
3662
3663         params = kzalloc(sizeof(*params), GFP_KERNEL);
3664         if (!params) {
3665                 BT_ERR("Out of memory");
3666                 return NULL;
3667         }
3668
3669         bacpy(&params->addr, addr);
3670         params->addr_type = addr_type;
3671
3672         list_add(&params->list, &hdev->le_conn_params);
3673         INIT_LIST_HEAD(&params->action);
3674
3675         params->conn_min_interval = hdev->le_conn_min_interval;
3676         params->conn_max_interval = hdev->le_conn_max_interval;
3677         params->conn_latency = hdev->le_conn_latency;
3678         params->supervision_timeout = hdev->le_supv_timeout;
3679         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3680
3681         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3682
3683         return params;
3684 }
3685
3686 /* This function requires the caller holds hdev->lock */
3687 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3688                         u8 auto_connect)
3689 {
3690         struct hci_conn_params *params;
3691
3692         params = hci_conn_params_add(hdev, addr, addr_type);
3693         if (!params)
3694                 return -EIO;
3695
3696         if (params->auto_connect == auto_connect)
3697                 return 0;
3698
3699         list_del_init(&params->action);
3700
3701         switch (auto_connect) {
3702         case HCI_AUTO_CONN_DISABLED:
3703         case HCI_AUTO_CONN_LINK_LOSS:
3704                 hci_update_background_scan(hdev);
3705                 break;
3706         case HCI_AUTO_CONN_REPORT:
3707                 list_add(&params->action, &hdev->pend_le_reports);
3708                 hci_update_background_scan(hdev);
3709                 break;
3710         case HCI_AUTO_CONN_DIRECT:
3711         case HCI_AUTO_CONN_ALWAYS:
3712                 if (!is_connected(hdev, addr, addr_type)) {
3713                         list_add(&params->action, &hdev->pend_le_conns);
3714                         hci_update_background_scan(hdev);
3715                 }
3716                 break;
3717         }
3718
3719         params->auto_connect = auto_connect;
3720
3721         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3722                auto_connect);
3723
3724         return 0;
3725 }
3726
3727 /* This function requires the caller holds hdev->lock */
3728 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3729 {
3730         struct hci_conn_params *params;
3731
3732         params = hci_conn_params_lookup(hdev, addr, addr_type);
3733         if (!params)
3734                 return;
3735
3736         list_del(&params->action);
3737         list_del(&params->list);
3738         kfree(params);
3739
3740         hci_update_background_scan(hdev);
3741
3742         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3743 }
3744
3745 /* This function requires the caller holds hdev->lock */
3746 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3747 {
3748         struct hci_conn_params *params, *tmp;
3749
3750         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3751                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3752                         continue;
3753                 list_del(&params->list);
3754                 kfree(params);
3755         }
3756
3757         BT_DBG("All LE disabled connection parameters were removed");
3758 }
3759
3760 /* This function requires the caller holds hdev->lock */
3761 void hci_conn_params_clear_all(struct hci_dev *hdev)
3762 {
3763         struct hci_conn_params *params, *tmp;
3764
3765         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3766                 list_del(&params->action);
3767                 list_del(&params->list);
3768                 kfree(params);
3769         }
3770
3771         hci_update_background_scan(hdev);
3772
3773         BT_DBG("All LE connection parameters were removed");
3774 }
3775
3776 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3777 {
3778         if (status) {
3779                 BT_ERR("Failed to start inquiry: status %d", status);
3780
3781                 hci_dev_lock(hdev);
3782                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3783                 hci_dev_unlock(hdev);
3784                 return;
3785         }
3786 }
3787
3788 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3789 {
3790         /* General inquiry access code (GIAC) */
3791         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3792         struct hci_request req;
3793         struct hci_cp_inquiry cp;
3794         int err;
3795
3796         if (status) {
3797                 BT_ERR("Failed to disable LE scanning: status %d", status);
3798                 return;
3799         }
3800
3801         switch (hdev->discovery.type) {
3802         case DISCOV_TYPE_LE:
3803                 hci_dev_lock(hdev);
3804                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3805                 hci_dev_unlock(hdev);
3806                 break;
3807
3808         case DISCOV_TYPE_INTERLEAVED:
3809                 hci_req_init(&req, hdev);
3810
3811                 memset(&cp, 0, sizeof(cp));
3812                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3813                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3814                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3815
3816                 hci_dev_lock(hdev);
3817
3818                 hci_inquiry_cache_flush(hdev);
3819
3820                 err = hci_req_run(&req, inquiry_complete);
3821                 if (err) {
3822                         BT_ERR("Inquiry request failed: err %d", err);
3823                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3824                 }
3825
3826                 hci_dev_unlock(hdev);
3827                 break;
3828         }
3829 }
3830
3831 static void le_scan_disable_work(struct work_struct *work)
3832 {
3833         struct hci_dev *hdev = container_of(work, struct hci_dev,
3834                                             le_scan_disable.work);
3835         struct hci_request req;
3836         int err;
3837
3838         BT_DBG("%s", hdev->name);
3839
3840         hci_req_init(&req, hdev);
3841
3842         hci_req_add_le_scan_disable(&req);
3843
3844         err = hci_req_run(&req, le_scan_disable_work_complete);
3845         if (err)
3846                 BT_ERR("Disable LE scanning request failed: err %d", err);
3847 }
3848
3849 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3850 {
3851         struct hci_dev *hdev = req->hdev;
3852
3853         /* If we're advertising or initiating an LE connection we can't
3854          * go ahead and change the random address at this time. This is
3855          * because the eventual initiator address used for the
3856          * subsequently created connection will be undefined (some
3857          * controllers use the new address and others the one we had
3858          * when the operation started).
3859          *
3860          * In this kind of scenario skip the update and let the random
3861          * address be updated at the next cycle.
3862          */
3863         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3864             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3865                 BT_DBG("Deferring random address update");
3866                 return;
3867         }
3868
3869         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3870 }
3871
3872 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3873                               u8 *own_addr_type)
3874 {
3875         struct hci_dev *hdev = req->hdev;
3876         int err;
3877
3878         /* If privacy is enabled use a resolvable private address. If
3879          * current RPA has expired or there is something else than
3880          * the current RPA in use, then generate a new one.
3881          */
3882         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3883                 int to;
3884
3885                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3886
3887                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3888                     !bacmp(&hdev->random_addr, &hdev->rpa))
3889                         return 0;
3890
3891                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3892                 if (err < 0) {
3893                         BT_ERR("%s failed to generate new RPA", hdev->name);
3894                         return err;
3895                 }
3896
3897                 set_random_addr(req, &hdev->rpa);
3898
3899                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3900                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3901
3902                 return 0;
3903         }
3904
3905         /* In case of required privacy without resolvable private address,
3906          * use an unresolvable private address. This is useful for active
3907          * scanning and non-connectable advertising.
3908          */
3909         if (require_privacy) {
3910                 bdaddr_t urpa;
3911
3912                 get_random_bytes(&urpa, 6);
3913                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3914
3915                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3916                 set_random_addr(req, &urpa);
3917                 return 0;
3918         }
3919
3920         /* If forcing static address is in use or there is no public
3921          * address use the static address as random address (but skip
3922          * the HCI command if the current random address is already the
3923          * static one.
3924          */
3925         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3926             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3927                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3928                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3929                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3930                                     &hdev->static_addr);
3931                 return 0;
3932         }
3933
3934         /* Neither privacy nor static address is being used so use a
3935          * public address.
3936          */
3937         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3938
3939         return 0;
3940 }
3941
3942 /* Copy the Identity Address of the controller.
3943  *
3944  * If the controller has a public BD_ADDR, then by default use that one.
3945  * If this is a LE only controller without a public address, default to
3946  * the static random address.
3947  *
3948  * For debugging purposes it is possible to force controllers with a
3949  * public address to use the static random address instead.
3950  */
3951 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3952                                u8 *bdaddr_type)
3953 {
3954         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3955             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3956                 bacpy(bdaddr, &hdev->static_addr);
3957                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3958         } else {
3959                 bacpy(bdaddr, &hdev->bdaddr);
3960                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3961         }
3962 }
3963
3964 /* Alloc HCI device */
3965 struct hci_dev *hci_alloc_dev(void)
3966 {
3967         struct hci_dev *hdev;
3968
3969         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3970         if (!hdev)
3971                 return NULL;
3972
3973         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3974         hdev->esco_type = (ESCO_HV1);
3975         hdev->link_mode = (HCI_LM_ACCEPT);
3976         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3977         hdev->io_capability = 0x03;     /* No Input No Output */
3978         hdev->manufacturer = 0xffff;    /* Default to internal use */
3979         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3980         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3981
3982         hdev->sniff_max_interval = 800;
3983         hdev->sniff_min_interval = 80;
3984
3985         hdev->le_adv_channel_map = 0x07;
3986         hdev->le_adv_min_interval = 0x0800;
3987         hdev->le_adv_max_interval = 0x0800;
3988         hdev->le_scan_interval = 0x0060;
3989         hdev->le_scan_window = 0x0030;
3990         hdev->le_conn_min_interval = 0x0028;
3991         hdev->le_conn_max_interval = 0x0038;
3992         hdev->le_conn_latency = 0x0000;
3993         hdev->le_supv_timeout = 0x002a;
3994
3995         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3996         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3997         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3998         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3999
4000         mutex_init(&hdev->lock);
4001         mutex_init(&hdev->req_lock);
4002
4003         INIT_LIST_HEAD(&hdev->mgmt_pending);
4004         INIT_LIST_HEAD(&hdev->blacklist);
4005         INIT_LIST_HEAD(&hdev->whitelist);
4006         INIT_LIST_HEAD(&hdev->uuids);
4007         INIT_LIST_HEAD(&hdev->link_keys);
4008         INIT_LIST_HEAD(&hdev->long_term_keys);
4009         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4010         INIT_LIST_HEAD(&hdev->remote_oob_data);
4011         INIT_LIST_HEAD(&hdev->le_white_list);
4012         INIT_LIST_HEAD(&hdev->le_conn_params);
4013         INIT_LIST_HEAD(&hdev->pend_le_conns);
4014         INIT_LIST_HEAD(&hdev->pend_le_reports);
4015         INIT_LIST_HEAD(&hdev->conn_hash.list);
4016
4017         INIT_WORK(&hdev->rx_work, hci_rx_work);
4018         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4019         INIT_WORK(&hdev->tx_work, hci_tx_work);
4020         INIT_WORK(&hdev->power_on, hci_power_on);
4021
4022         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4023         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4024         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4025
4026         skb_queue_head_init(&hdev->rx_q);
4027         skb_queue_head_init(&hdev->cmd_q);
4028         skb_queue_head_init(&hdev->raw_q);
4029
4030         init_waitqueue_head(&hdev->req_wait_q);
4031
4032         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4033
4034         hci_init_sysfs(hdev);
4035         discovery_init(hdev);
4036
4037         return hdev;
4038 }
4039 EXPORT_SYMBOL(hci_alloc_dev);
4040
4041 /* Free HCI device */
4042 void hci_free_dev(struct hci_dev *hdev)
4043 {
4044         /* will free via device release */
4045         put_device(&hdev->dev);
4046 }
4047 EXPORT_SYMBOL(hci_free_dev);
4048
4049 /* Register HCI device */
4050 int hci_register_dev(struct hci_dev *hdev)
4051 {
4052         int id, error;
4053
4054         if (!hdev->open || !hdev->close || !hdev->send)
4055                 return -EINVAL;
4056
4057         /* Do not allow HCI_AMP devices to register at index 0,
4058          * so the index can be used as the AMP controller ID.
4059          */
4060         switch (hdev->dev_type) {
4061         case HCI_BREDR:
4062                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4063                 break;
4064         case HCI_AMP:
4065                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4066                 break;
4067         default:
4068                 return -EINVAL;
4069         }
4070
4071         if (id < 0)
4072                 return id;
4073
4074         sprintf(hdev->name, "hci%d", id);
4075         hdev->id = id;
4076
4077         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4078
4079         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4080                                           WQ_MEM_RECLAIM, 1, hdev->name);
4081         if (!hdev->workqueue) {
4082                 error = -ENOMEM;
4083                 goto err;
4084         }
4085
4086         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4087                                               WQ_MEM_RECLAIM, 1, hdev->name);
4088         if (!hdev->req_workqueue) {
4089                 destroy_workqueue(hdev->workqueue);
4090                 error = -ENOMEM;
4091                 goto err;
4092         }
4093
4094         if (!IS_ERR_OR_NULL(bt_debugfs))
4095                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4096
4097         dev_set_name(&hdev->dev, "%s", hdev->name);
4098
4099         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4100                                                CRYPTO_ALG_ASYNC);
4101         if (IS_ERR(hdev->tfm_aes)) {
4102                 BT_ERR("Unable to create crypto context");
4103                 error = PTR_ERR(hdev->tfm_aes);
4104                 hdev->tfm_aes = NULL;
4105                 goto err_wqueue;
4106         }
4107
4108         error = device_add(&hdev->dev);
4109         if (error < 0)
4110                 goto err_tfm;
4111
4112         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4113                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4114                                     hdev);
4115         if (hdev->rfkill) {
4116                 if (rfkill_register(hdev->rfkill) < 0) {
4117                         rfkill_destroy(hdev->rfkill);
4118                         hdev->rfkill = NULL;
4119                 }
4120         }
4121
4122         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4123                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4124
4125         set_bit(HCI_SETUP, &hdev->dev_flags);
4126         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4127
4128         if (hdev->dev_type == HCI_BREDR) {
4129                 /* Assume BR/EDR support until proven otherwise (such as
4130                  * through reading supported features during init.
4131                  */
4132                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4133         }
4134
4135         write_lock(&hci_dev_list_lock);
4136         list_add(&hdev->list, &hci_dev_list);
4137         write_unlock(&hci_dev_list_lock);
4138
4139         /* Devices that are marked for raw-only usage are unconfigured
4140          * and should not be included in normal operation.
4141          */
4142         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4143                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4144
4145         hci_notify(hdev, HCI_DEV_REG);
4146         hci_dev_hold(hdev);
4147
4148         queue_work(hdev->req_workqueue, &hdev->power_on);
4149
4150         return id;
4151
4152 err_tfm:
4153         crypto_free_blkcipher(hdev->tfm_aes);
4154 err_wqueue:
4155         destroy_workqueue(hdev->workqueue);
4156         destroy_workqueue(hdev->req_workqueue);
4157 err:
4158         ida_simple_remove(&hci_index_ida, hdev->id);
4159
4160         return error;
4161 }
4162 EXPORT_SYMBOL(hci_register_dev);
4163
4164 /* Unregister HCI device */
4165 void hci_unregister_dev(struct hci_dev *hdev)
4166 {
4167         int i, id;
4168
4169         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4170
4171         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4172
4173         id = hdev->id;
4174
4175         write_lock(&hci_dev_list_lock);
4176         list_del(&hdev->list);
4177         write_unlock(&hci_dev_list_lock);
4178
4179         hci_dev_do_close(hdev);
4180
4181         for (i = 0; i < NUM_REASSEMBLY; i++)
4182                 kfree_skb(hdev->reassembly[i]);
4183
4184         cancel_work_sync(&hdev->power_on);
4185
4186         if (!test_bit(HCI_INIT, &hdev->flags) &&
4187             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4188             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4189                 hci_dev_lock(hdev);
4190                 mgmt_index_removed(hdev);
4191                 hci_dev_unlock(hdev);
4192         }
4193
4194         /* mgmt_index_removed should take care of emptying the
4195          * pending list */
4196         BUG_ON(!list_empty(&hdev->mgmt_pending));
4197
4198         hci_notify(hdev, HCI_DEV_UNREG);
4199
4200         if (hdev->rfkill) {
4201                 rfkill_unregister(hdev->rfkill);
4202                 rfkill_destroy(hdev->rfkill);
4203         }
4204
4205         if (hdev->tfm_aes)
4206                 crypto_free_blkcipher(hdev->tfm_aes);
4207
4208         device_del(&hdev->dev);
4209
4210         debugfs_remove_recursive(hdev->debugfs);
4211
4212         destroy_workqueue(hdev->workqueue);
4213         destroy_workqueue(hdev->req_workqueue);
4214
4215         hci_dev_lock(hdev);
4216         hci_bdaddr_list_clear(&hdev->blacklist);
4217         hci_bdaddr_list_clear(&hdev->whitelist);
4218         hci_uuids_clear(hdev);
4219         hci_link_keys_clear(hdev);
4220         hci_smp_ltks_clear(hdev);
4221         hci_smp_irks_clear(hdev);
4222         hci_remote_oob_data_clear(hdev);
4223         hci_bdaddr_list_clear(&hdev->le_white_list);
4224         hci_conn_params_clear_all(hdev);
4225         hci_dev_unlock(hdev);
4226
4227         hci_dev_put(hdev);
4228
4229         ida_simple_remove(&hci_index_ida, id);
4230 }
4231 EXPORT_SYMBOL(hci_unregister_dev);
4232
4233 /* Suspend HCI device */
4234 int hci_suspend_dev(struct hci_dev *hdev)
4235 {
4236         hci_notify(hdev, HCI_DEV_SUSPEND);
4237         return 0;
4238 }
4239 EXPORT_SYMBOL(hci_suspend_dev);
4240
4241 /* Resume HCI device */
4242 int hci_resume_dev(struct hci_dev *hdev)
4243 {
4244         hci_notify(hdev, HCI_DEV_RESUME);
4245         return 0;
4246 }
4247 EXPORT_SYMBOL(hci_resume_dev);
4248
4249 /* Receive frame from HCI drivers */
4250 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4251 {
4252         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4253                       && !test_bit(HCI_INIT, &hdev->flags))) {
4254                 kfree_skb(skb);
4255                 return -ENXIO;
4256         }
4257
4258         /* Incoming skb */
4259         bt_cb(skb)->incoming = 1;
4260
4261         /* Time stamp */
4262         __net_timestamp(skb);
4263
4264         skb_queue_tail(&hdev->rx_q, skb);
4265         queue_work(hdev->workqueue, &hdev->rx_work);
4266
4267         return 0;
4268 }
4269 EXPORT_SYMBOL(hci_recv_frame);
4270
4271 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4272                           int count, __u8 index)
4273 {
4274         int len = 0;
4275         int hlen = 0;
4276         int remain = count;
4277         struct sk_buff *skb;
4278         struct bt_skb_cb *scb;
4279
4280         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4281             index >= NUM_REASSEMBLY)
4282                 return -EILSEQ;
4283
4284         skb = hdev->reassembly[index];
4285
4286         if (!skb) {
4287                 switch (type) {
4288                 case HCI_ACLDATA_PKT:
4289                         len = HCI_MAX_FRAME_SIZE;
4290                         hlen = HCI_ACL_HDR_SIZE;
4291                         break;
4292                 case HCI_EVENT_PKT:
4293                         len = HCI_MAX_EVENT_SIZE;
4294                         hlen = HCI_EVENT_HDR_SIZE;
4295                         break;
4296                 case HCI_SCODATA_PKT:
4297                         len = HCI_MAX_SCO_SIZE;
4298                         hlen = HCI_SCO_HDR_SIZE;
4299                         break;
4300                 }
4301
4302                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4303                 if (!skb)
4304                         return -ENOMEM;
4305
4306                 scb = (void *) skb->cb;
4307                 scb->expect = hlen;
4308                 scb->pkt_type = type;
4309
4310                 hdev->reassembly[index] = skb;
4311         }
4312
4313         while (count) {
4314                 scb = (void *) skb->cb;
4315                 len = min_t(uint, scb->expect, count);
4316
4317                 memcpy(skb_put(skb, len), data, len);
4318
4319                 count -= len;
4320                 data += len;
4321                 scb->expect -= len;
4322                 remain = count;
4323
4324                 switch (type) {
4325                 case HCI_EVENT_PKT:
4326                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4327                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4328                                 scb->expect = h->plen;
4329
4330                                 if (skb_tailroom(skb) < scb->expect) {
4331                                         kfree_skb(skb);
4332                                         hdev->reassembly[index] = NULL;
4333                                         return -ENOMEM;
4334                                 }
4335                         }
4336                         break;
4337
4338                 case HCI_ACLDATA_PKT:
4339                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4340                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4341                                 scb->expect = __le16_to_cpu(h->dlen);
4342
4343                                 if (skb_tailroom(skb) < scb->expect) {
4344                                         kfree_skb(skb);
4345                                         hdev->reassembly[index] = NULL;
4346                                         return -ENOMEM;
4347                                 }
4348                         }
4349                         break;
4350
4351                 case HCI_SCODATA_PKT:
4352                         if (skb->len == HCI_SCO_HDR_SIZE) {
4353                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4354                                 scb->expect = h->dlen;
4355
4356                                 if (skb_tailroom(skb) < scb->expect) {
4357                                         kfree_skb(skb);
4358                                         hdev->reassembly[index] = NULL;
4359                                         return -ENOMEM;
4360                                 }
4361                         }
4362                         break;
4363                 }
4364
4365                 if (scb->expect == 0) {
4366                         /* Complete frame */
4367
4368                         bt_cb(skb)->pkt_type = type;
4369                         hci_recv_frame(hdev, skb);
4370
4371                         hdev->reassembly[index] = NULL;
4372                         return remain;
4373                 }
4374         }
4375
4376         return remain;
4377 }
4378
4379 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4380 {
4381         int rem = 0;
4382
4383         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4384                 return -EILSEQ;
4385
4386         while (count) {
4387                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4388                 if (rem < 0)
4389                         return rem;
4390
4391                 data += (count - rem);
4392                 count = rem;
4393         }
4394
4395         return rem;
4396 }
4397 EXPORT_SYMBOL(hci_recv_fragment);
4398
4399 #define STREAM_REASSEMBLY 0
4400
4401 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4402 {
4403         int type;
4404         int rem = 0;
4405
4406         while (count) {
4407                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4408
4409                 if (!skb) {
4410                         struct { char type; } *pkt;
4411
4412                         /* Start of the frame */
4413                         pkt = data;
4414                         type = pkt->type;
4415
4416                         data++;
4417                         count--;
4418                 } else
4419                         type = bt_cb(skb)->pkt_type;
4420
4421                 rem = hci_reassembly(hdev, type, data, count,
4422                                      STREAM_REASSEMBLY);
4423                 if (rem < 0)
4424                         return rem;
4425
4426                 data += (count - rem);
4427                 count = rem;
4428         }
4429
4430         return rem;
4431 }
4432 EXPORT_SYMBOL(hci_recv_stream_fragment);
4433
4434 /* ---- Interface to upper protocols ---- */
4435
4436 int hci_register_cb(struct hci_cb *cb)
4437 {
4438         BT_DBG("%p name %s", cb, cb->name);
4439
4440         write_lock(&hci_cb_list_lock);
4441         list_add(&cb->list, &hci_cb_list);
4442         write_unlock(&hci_cb_list_lock);
4443
4444         return 0;
4445 }
4446 EXPORT_SYMBOL(hci_register_cb);
4447
4448 int hci_unregister_cb(struct hci_cb *cb)
4449 {
4450         BT_DBG("%p name %s", cb, cb->name);
4451
4452         write_lock(&hci_cb_list_lock);
4453         list_del(&cb->list);
4454         write_unlock(&hci_cb_list_lock);
4455
4456         return 0;
4457 }
4458 EXPORT_SYMBOL(hci_unregister_cb);
4459
4460 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4461 {
4462         int err;
4463
4464         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4465
4466         /* Time stamp */
4467         __net_timestamp(skb);
4468
4469         /* Send copy to monitor */
4470         hci_send_to_monitor(hdev, skb);
4471
4472         if (atomic_read(&hdev->promisc)) {
4473                 /* Send copy to the sockets */
4474                 hci_send_to_sock(hdev, skb);
4475         }
4476
4477         /* Get rid of skb owner, prior to sending to the driver. */
4478         skb_orphan(skb);
4479
4480         err = hdev->send(hdev, skb);
4481         if (err < 0) {
4482                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4483                 kfree_skb(skb);
4484         }
4485 }
4486
4487 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4488 {
4489         skb_queue_head_init(&req->cmd_q);
4490         req->hdev = hdev;
4491         req->err = 0;
4492 }
4493
4494 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4495 {
4496         struct hci_dev *hdev = req->hdev;
4497         struct sk_buff *skb;
4498         unsigned long flags;
4499
4500         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4501
4502         /* If an error occured during request building, remove all HCI
4503          * commands queued on the HCI request queue.
4504          */
4505         if (req->err) {
4506                 skb_queue_purge(&req->cmd_q);
4507                 return req->err;
4508         }
4509
4510         /* Do not allow empty requests */
4511         if (skb_queue_empty(&req->cmd_q))
4512                 return -ENODATA;
4513
4514         skb = skb_peek_tail(&req->cmd_q);
4515         bt_cb(skb)->req.complete = complete;
4516
4517         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4518         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4519         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4520
4521         queue_work(hdev->workqueue, &hdev->cmd_work);
4522
4523         return 0;
4524 }
4525
4526 bool hci_req_pending(struct hci_dev *hdev)
4527 {
4528         return (hdev->req_status == HCI_REQ_PEND);
4529 }
4530
4531 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4532                                        u32 plen, const void *param)
4533 {
4534         int len = HCI_COMMAND_HDR_SIZE + plen;
4535         struct hci_command_hdr *hdr;
4536         struct sk_buff *skb;
4537
4538         skb = bt_skb_alloc(len, GFP_ATOMIC);
4539         if (!skb)
4540                 return NULL;
4541
4542         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4543         hdr->opcode = cpu_to_le16(opcode);
4544         hdr->plen   = plen;
4545
4546         if (plen)
4547                 memcpy(skb_put(skb, plen), param, plen);
4548
4549         BT_DBG("skb len %d", skb->len);
4550
4551         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4552
4553         return skb;
4554 }
4555
4556 /* Send HCI command */
4557 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4558                  const void *param)
4559 {
4560         struct sk_buff *skb;
4561
4562         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4563
4564         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4565         if (!skb) {
4566                 BT_ERR("%s no memory for command", hdev->name);
4567                 return -ENOMEM;
4568         }
4569
4570         /* Stand-alone HCI commands must be flaged as
4571          * single-command requests.
4572          */
4573         bt_cb(skb)->req.start = true;
4574
4575         skb_queue_tail(&hdev->cmd_q, skb);
4576         queue_work(hdev->workqueue, &hdev->cmd_work);
4577
4578         return 0;
4579 }
4580
4581 /* Queue a command to an asynchronous HCI request */
4582 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4583                     const void *param, u8 event)
4584 {
4585         struct hci_dev *hdev = req->hdev;
4586         struct sk_buff *skb;
4587
4588         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4589
4590         /* If an error occured during request building, there is no point in
4591          * queueing the HCI command. We can simply return.
4592          */
4593         if (req->err)
4594                 return;
4595
4596         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4597         if (!skb) {
4598                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4599                        hdev->name, opcode);
4600                 req->err = -ENOMEM;
4601                 return;
4602         }
4603
4604         if (skb_queue_empty(&req->cmd_q))
4605                 bt_cb(skb)->req.start = true;
4606
4607         bt_cb(skb)->req.event = event;
4608
4609         skb_queue_tail(&req->cmd_q, skb);
4610 }
4611
4612 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4613                  const void *param)
4614 {
4615         hci_req_add_ev(req, opcode, plen, param, 0);
4616 }
4617
4618 /* Get data from the previously sent command */
4619 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4620 {
4621         struct hci_command_hdr *hdr;
4622
4623         if (!hdev->sent_cmd)
4624                 return NULL;
4625
4626         hdr = (void *) hdev->sent_cmd->data;
4627
4628         if (hdr->opcode != cpu_to_le16(opcode))
4629                 return NULL;
4630
4631         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4632
4633         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4634 }
4635
4636 /* Send ACL data */
4637 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4638 {
4639         struct hci_acl_hdr *hdr;
4640         int len = skb->len;
4641
4642         skb_push(skb, HCI_ACL_HDR_SIZE);
4643         skb_reset_transport_header(skb);
4644         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4645         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4646         hdr->dlen   = cpu_to_le16(len);
4647 }
4648
4649 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4650                           struct sk_buff *skb, __u16 flags)
4651 {
4652         struct hci_conn *conn = chan->conn;
4653         struct hci_dev *hdev = conn->hdev;
4654         struct sk_buff *list;
4655
4656         skb->len = skb_headlen(skb);
4657         skb->data_len = 0;
4658
4659         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4660
4661         switch (hdev->dev_type) {
4662         case HCI_BREDR:
4663                 hci_add_acl_hdr(skb, conn->handle, flags);
4664                 break;
4665         case HCI_AMP:
4666                 hci_add_acl_hdr(skb, chan->handle, flags);
4667                 break;
4668         default:
4669                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4670                 return;
4671         }
4672
4673         list = skb_shinfo(skb)->frag_list;
4674         if (!list) {
4675                 /* Non fragmented */
4676                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4677
4678                 skb_queue_tail(queue, skb);
4679         } else {
4680                 /* Fragmented */
4681                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4682
4683                 skb_shinfo(skb)->frag_list = NULL;
4684
4685                 /* Queue all fragments atomically */
4686                 spin_lock(&queue->lock);
4687
4688                 __skb_queue_tail(queue, skb);
4689
4690                 flags &= ~ACL_START;
4691                 flags |= ACL_CONT;
4692                 do {
4693                         skb = list; list = list->next;
4694
4695                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4696                         hci_add_acl_hdr(skb, conn->handle, flags);
4697
4698                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4699
4700                         __skb_queue_tail(queue, skb);
4701                 } while (list);
4702
4703                 spin_unlock(&queue->lock);
4704         }
4705 }
4706
4707 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4708 {
4709         struct hci_dev *hdev = chan->conn->hdev;
4710
4711         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4712
4713         hci_queue_acl(chan, &chan->data_q, skb, flags);
4714
4715         queue_work(hdev->workqueue, &hdev->tx_work);
4716 }
4717
4718 /* Send SCO data */
4719 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4720 {
4721         struct hci_dev *hdev = conn->hdev;
4722         struct hci_sco_hdr hdr;
4723
4724         BT_DBG("%s len %d", hdev->name, skb->len);
4725
4726         hdr.handle = cpu_to_le16(conn->handle);
4727         hdr.dlen   = skb->len;
4728
4729         skb_push(skb, HCI_SCO_HDR_SIZE);
4730         skb_reset_transport_header(skb);
4731         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4732
4733         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4734
4735         skb_queue_tail(&conn->data_q, skb);
4736         queue_work(hdev->workqueue, &hdev->tx_work);
4737 }
4738
4739 /* ---- HCI TX task (outgoing data) ---- */
4740
4741 /* HCI Connection scheduler */
4742 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4743                                      int *quote)
4744 {
4745         struct hci_conn_hash *h = &hdev->conn_hash;
4746         struct hci_conn *conn = NULL, *c;
4747         unsigned int num = 0, min = ~0;
4748
4749         /* We don't have to lock device here. Connections are always
4750          * added and removed with TX task disabled. */
4751
4752         rcu_read_lock();
4753
4754         list_for_each_entry_rcu(c, &h->list, list) {
4755                 if (c->type != type || skb_queue_empty(&c->data_q))
4756                         continue;
4757
4758                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4759                         continue;
4760
4761                 num++;
4762
4763                 if (c->sent < min) {
4764                         min  = c->sent;
4765                         conn = c;
4766                 }
4767
4768                 if (hci_conn_num(hdev, type) == num)
4769                         break;
4770         }
4771
4772         rcu_read_unlock();
4773
4774         if (conn) {
4775                 int cnt, q;
4776
4777                 switch (conn->type) {
4778                 case ACL_LINK:
4779                         cnt = hdev->acl_cnt;
4780                         break;
4781                 case SCO_LINK:
4782                 case ESCO_LINK:
4783                         cnt = hdev->sco_cnt;
4784                         break;
4785                 case LE_LINK:
4786                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4787                         break;
4788                 default:
4789                         cnt = 0;
4790                         BT_ERR("Unknown link type");
4791                 }
4792
4793                 q = cnt / num;
4794                 *quote = q ? q : 1;
4795         } else
4796                 *quote = 0;
4797
4798         BT_DBG("conn %p quote %d", conn, *quote);
4799         return conn;
4800 }
4801
4802 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4803 {
4804         struct hci_conn_hash *h = &hdev->conn_hash;
4805         struct hci_conn *c;
4806
4807         BT_ERR("%s link tx timeout", hdev->name);
4808
4809         rcu_read_lock();
4810
4811         /* Kill stalled connections */
4812         list_for_each_entry_rcu(c, &h->list, list) {
4813                 if (c->type == type && c->sent) {
4814                         BT_ERR("%s killing stalled connection %pMR",
4815                                hdev->name, &c->dst);
4816                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4817                 }
4818         }
4819
4820         rcu_read_unlock();
4821 }
4822
4823 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4824                                       int *quote)
4825 {
4826         struct hci_conn_hash *h = &hdev->conn_hash;
4827         struct hci_chan *chan = NULL;
4828         unsigned int num = 0, min = ~0, cur_prio = 0;
4829         struct hci_conn *conn;
4830         int cnt, q, conn_num = 0;
4831
4832         BT_DBG("%s", hdev->name);
4833
4834         rcu_read_lock();
4835
4836         list_for_each_entry_rcu(conn, &h->list, list) {
4837                 struct hci_chan *tmp;
4838
4839                 if (conn->type != type)
4840                         continue;
4841
4842                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4843                         continue;
4844
4845                 conn_num++;
4846
4847                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4848                         struct sk_buff *skb;
4849
4850                         if (skb_queue_empty(&tmp->data_q))
4851                                 continue;
4852
4853                         skb = skb_peek(&tmp->data_q);
4854                         if (skb->priority < cur_prio)
4855                                 continue;
4856
4857                         if (skb->priority > cur_prio) {
4858                                 num = 0;
4859                                 min = ~0;
4860                                 cur_prio = skb->priority;
4861                         }
4862
4863                         num++;
4864
4865                         if (conn->sent < min) {
4866                                 min  = conn->sent;
4867                                 chan = tmp;
4868                         }
4869                 }
4870
4871                 if (hci_conn_num(hdev, type) == conn_num)
4872                         break;
4873         }
4874
4875         rcu_read_unlock();
4876
4877         if (!chan)
4878                 return NULL;
4879
4880         switch (chan->conn->type) {
4881         case ACL_LINK:
4882                 cnt = hdev->acl_cnt;
4883                 break;
4884         case AMP_LINK:
4885                 cnt = hdev->block_cnt;
4886                 break;
4887         case SCO_LINK:
4888         case ESCO_LINK:
4889                 cnt = hdev->sco_cnt;
4890                 break;
4891         case LE_LINK:
4892                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4893                 break;
4894         default:
4895                 cnt = 0;
4896                 BT_ERR("Unknown link type");
4897         }
4898
4899         q = cnt / num;
4900         *quote = q ? q : 1;
4901         BT_DBG("chan %p quote %d", chan, *quote);
4902         return chan;
4903 }
4904
4905 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4906 {
4907         struct hci_conn_hash *h = &hdev->conn_hash;
4908         struct hci_conn *conn;
4909         int num = 0;
4910
4911         BT_DBG("%s", hdev->name);
4912
4913         rcu_read_lock();
4914
4915         list_for_each_entry_rcu(conn, &h->list, list) {
4916                 struct hci_chan *chan;
4917
4918                 if (conn->type != type)
4919                         continue;
4920
4921                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4922                         continue;
4923
4924                 num++;
4925
4926                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4927                         struct sk_buff *skb;
4928
4929                         if (chan->sent) {
4930                                 chan->sent = 0;
4931                                 continue;
4932                         }
4933
4934                         if (skb_queue_empty(&chan->data_q))
4935                                 continue;
4936
4937                         skb = skb_peek(&chan->data_q);
4938                         if (skb->priority >= HCI_PRIO_MAX - 1)
4939                                 continue;
4940
4941                         skb->priority = HCI_PRIO_MAX - 1;
4942
4943                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4944                                skb->priority);
4945                 }
4946
4947                 if (hci_conn_num(hdev, type) == num)
4948                         break;
4949         }
4950
4951         rcu_read_unlock();
4952
4953 }
4954
4955 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4956 {
4957         /* Calculate count of blocks used by this packet */
4958         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4959 }
4960
4961 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4962 {
4963         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4964                 /* ACL tx timeout must be longer than maximum
4965                  * link supervision timeout (40.9 seconds) */
4966                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4967                                        HCI_ACL_TX_TIMEOUT))
4968                         hci_link_tx_to(hdev, ACL_LINK);
4969         }
4970 }
4971
4972 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4973 {
4974         unsigned int cnt = hdev->acl_cnt;
4975         struct hci_chan *chan;
4976         struct sk_buff *skb;
4977         int quote;
4978
4979         __check_timeout(hdev, cnt);
4980
4981         while (hdev->acl_cnt &&
4982                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4983                 u32 priority = (skb_peek(&chan->data_q))->priority;
4984                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4985                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4986                                skb->len, skb->priority);
4987
4988                         /* Stop if priority has changed */
4989                         if (skb->priority < priority)
4990                                 break;
4991
4992                         skb = skb_dequeue(&chan->data_q);
4993
4994                         hci_conn_enter_active_mode(chan->conn,
4995                                                    bt_cb(skb)->force_active);
4996
4997                         hci_send_frame(hdev, skb);
4998                         hdev->acl_last_tx = jiffies;
4999
5000                         hdev->acl_cnt--;
5001                         chan->sent++;
5002                         chan->conn->sent++;
5003                 }
5004         }
5005
5006         if (cnt != hdev->acl_cnt)
5007                 hci_prio_recalculate(hdev, ACL_LINK);
5008 }
5009
5010 static void hci_sched_acl_blk(struct hci_dev *hdev)
5011 {
5012         unsigned int cnt = hdev->block_cnt;
5013         struct hci_chan *chan;
5014         struct sk_buff *skb;
5015         int quote;
5016         u8 type;
5017
5018         __check_timeout(hdev, cnt);
5019
5020         BT_DBG("%s", hdev->name);
5021
5022         if (hdev->dev_type == HCI_AMP)
5023                 type = AMP_LINK;
5024         else
5025                 type = ACL_LINK;
5026
5027         while (hdev->block_cnt > 0 &&
5028                (chan = hci_chan_sent(hdev, type, &quote))) {
5029                 u32 priority = (skb_peek(&chan->data_q))->priority;
5030                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5031                         int blocks;
5032
5033                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5034                                skb->len, skb->priority);
5035
5036                         /* Stop if priority has changed */
5037                         if (skb->priority < priority)
5038                                 break;
5039
5040                         skb = skb_dequeue(&chan->data_q);
5041
5042                         blocks = __get_blocks(hdev, skb);
5043                         if (blocks > hdev->block_cnt)
5044                                 return;
5045
5046                         hci_conn_enter_active_mode(chan->conn,
5047                                                    bt_cb(skb)->force_active);
5048
5049                         hci_send_frame(hdev, skb);
5050                         hdev->acl_last_tx = jiffies;
5051
5052                         hdev->block_cnt -= blocks;
5053                         quote -= blocks;
5054
5055                         chan->sent += blocks;
5056                         chan->conn->sent += blocks;
5057                 }
5058         }
5059
5060         if (cnt != hdev->block_cnt)
5061                 hci_prio_recalculate(hdev, type);
5062 }
5063
5064 static void hci_sched_acl(struct hci_dev *hdev)
5065 {
5066         BT_DBG("%s", hdev->name);
5067
5068         /* No ACL link over BR/EDR controller */
5069         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5070                 return;
5071
5072         /* No AMP link over AMP controller */
5073         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5074                 return;
5075
5076         switch (hdev->flow_ctl_mode) {
5077         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5078                 hci_sched_acl_pkt(hdev);
5079                 break;
5080
5081         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5082                 hci_sched_acl_blk(hdev);
5083                 break;
5084         }
5085 }
5086
5087 /* Schedule SCO */
5088 static void hci_sched_sco(struct hci_dev *hdev)
5089 {
5090         struct hci_conn *conn;
5091         struct sk_buff *skb;
5092         int quote;
5093
5094         BT_DBG("%s", hdev->name);
5095
5096         if (!hci_conn_num(hdev, SCO_LINK))
5097                 return;
5098
5099         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5100                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5101                         BT_DBG("skb %p len %d", skb, skb->len);
5102                         hci_send_frame(hdev, skb);
5103
5104                         conn->sent++;
5105                         if (conn->sent == ~0)
5106                                 conn->sent = 0;
5107                 }
5108         }
5109 }
5110
5111 static void hci_sched_esco(struct hci_dev *hdev)
5112 {
5113         struct hci_conn *conn;
5114         struct sk_buff *skb;
5115         int quote;
5116
5117         BT_DBG("%s", hdev->name);
5118
5119         if (!hci_conn_num(hdev, ESCO_LINK))
5120                 return;
5121
5122         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5123                                                      &quote))) {
5124                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5125                         BT_DBG("skb %p len %d", skb, skb->len);
5126                         hci_send_frame(hdev, skb);
5127
5128                         conn->sent++;
5129                         if (conn->sent == ~0)
5130                                 conn->sent = 0;
5131                 }
5132         }
5133 }
5134
5135 static void hci_sched_le(struct hci_dev *hdev)
5136 {
5137         struct hci_chan *chan;
5138         struct sk_buff *skb;
5139         int quote, cnt, tmp;
5140
5141         BT_DBG("%s", hdev->name);
5142
5143         if (!hci_conn_num(hdev, LE_LINK))
5144                 return;
5145
5146         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5147                 /* LE tx timeout must be longer than maximum
5148                  * link supervision timeout (40.9 seconds) */
5149                 if (!hdev->le_cnt && hdev->le_pkts &&
5150                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5151                         hci_link_tx_to(hdev, LE_LINK);
5152         }
5153
5154         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5155         tmp = cnt;
5156         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5157                 u32 priority = (skb_peek(&chan->data_q))->priority;
5158                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5159                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5160                                skb->len, skb->priority);
5161
5162                         /* Stop if priority has changed */
5163                         if (skb->priority < priority)
5164                                 break;
5165
5166                         skb = skb_dequeue(&chan->data_q);
5167
5168                         hci_send_frame(hdev, skb);
5169                         hdev->le_last_tx = jiffies;
5170
5171                         cnt--;
5172                         chan->sent++;
5173                         chan->conn->sent++;
5174                 }
5175         }
5176
5177         if (hdev->le_pkts)
5178                 hdev->le_cnt = cnt;
5179         else
5180                 hdev->acl_cnt = cnt;
5181
5182         if (cnt != tmp)
5183                 hci_prio_recalculate(hdev, LE_LINK);
5184 }
5185
5186 static void hci_tx_work(struct work_struct *work)
5187 {
5188         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5189         struct sk_buff *skb;
5190
5191         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5192                hdev->sco_cnt, hdev->le_cnt);
5193
5194         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5195                 /* Schedule queues and send stuff to HCI driver */
5196                 hci_sched_acl(hdev);
5197                 hci_sched_sco(hdev);
5198                 hci_sched_esco(hdev);
5199                 hci_sched_le(hdev);
5200         }
5201
5202         /* Send next queued raw (unknown type) packet */
5203         while ((skb = skb_dequeue(&hdev->raw_q)))
5204                 hci_send_frame(hdev, skb);
5205 }
5206
5207 /* ----- HCI RX task (incoming data processing) ----- */
5208
5209 /* ACL data packet */
5210 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5211 {
5212         struct hci_acl_hdr *hdr = (void *) skb->data;
5213         struct hci_conn *conn;
5214         __u16 handle, flags;
5215
5216         skb_pull(skb, HCI_ACL_HDR_SIZE);
5217
5218         handle = __le16_to_cpu(hdr->handle);
5219         flags  = hci_flags(handle);
5220         handle = hci_handle(handle);
5221
5222         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5223                handle, flags);
5224
5225         hdev->stat.acl_rx++;
5226
5227         hci_dev_lock(hdev);
5228         conn = hci_conn_hash_lookup_handle(hdev, handle);
5229         hci_dev_unlock(hdev);
5230
5231         if (conn) {
5232                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5233
5234                 /* Send to upper protocol */
5235                 l2cap_recv_acldata(conn, skb, flags);
5236                 return;
5237         } else {
5238                 BT_ERR("%s ACL packet for unknown connection handle %d",
5239                        hdev->name, handle);
5240         }
5241
5242         kfree_skb(skb);
5243 }
5244
5245 /* SCO data packet */
5246 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5247 {
5248         struct hci_sco_hdr *hdr = (void *) skb->data;
5249         struct hci_conn *conn;
5250         __u16 handle;
5251
5252         skb_pull(skb, HCI_SCO_HDR_SIZE);
5253
5254         handle = __le16_to_cpu(hdr->handle);
5255
5256         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5257
5258         hdev->stat.sco_rx++;
5259
5260         hci_dev_lock(hdev);
5261         conn = hci_conn_hash_lookup_handle(hdev, handle);
5262         hci_dev_unlock(hdev);
5263
5264         if (conn) {
5265                 /* Send to upper protocol */
5266                 sco_recv_scodata(conn, skb);
5267                 return;
5268         } else {
5269                 BT_ERR("%s SCO packet for unknown connection handle %d",
5270                        hdev->name, handle);
5271         }
5272
5273         kfree_skb(skb);
5274 }
5275
5276 static bool hci_req_is_complete(struct hci_dev *hdev)
5277 {
5278         struct sk_buff *skb;
5279
5280         skb = skb_peek(&hdev->cmd_q);
5281         if (!skb)
5282                 return true;
5283
5284         return bt_cb(skb)->req.start;
5285 }
5286
5287 static void hci_resend_last(struct hci_dev *hdev)
5288 {
5289         struct hci_command_hdr *sent;
5290         struct sk_buff *skb;
5291         u16 opcode;
5292
5293         if (!hdev->sent_cmd)
5294                 return;
5295
5296         sent = (void *) hdev->sent_cmd->data;
5297         opcode = __le16_to_cpu(sent->opcode);
5298         if (opcode == HCI_OP_RESET)
5299                 return;
5300
5301         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5302         if (!skb)
5303                 return;
5304
5305         skb_queue_head(&hdev->cmd_q, skb);
5306         queue_work(hdev->workqueue, &hdev->cmd_work);
5307 }
5308
5309 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5310 {
5311         hci_req_complete_t req_complete = NULL;
5312         struct sk_buff *skb;
5313         unsigned long flags;
5314
5315         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5316
5317         /* If the completed command doesn't match the last one that was
5318          * sent we need to do special handling of it.
5319          */
5320         if (!hci_sent_cmd_data(hdev, opcode)) {
5321                 /* Some CSR based controllers generate a spontaneous
5322                  * reset complete event during init and any pending
5323                  * command will never be completed. In such a case we
5324                  * need to resend whatever was the last sent
5325                  * command.
5326                  */
5327                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5328                         hci_resend_last(hdev);
5329
5330                 return;
5331         }
5332
5333         /* If the command succeeded and there's still more commands in
5334          * this request the request is not yet complete.
5335          */
5336         if (!status && !hci_req_is_complete(hdev))
5337                 return;
5338
5339         /* If this was the last command in a request the complete
5340          * callback would be found in hdev->sent_cmd instead of the
5341          * command queue (hdev->cmd_q).
5342          */
5343         if (hdev->sent_cmd) {
5344                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5345
5346                 if (req_complete) {
5347                         /* We must set the complete callback to NULL to
5348                          * avoid calling the callback more than once if
5349                          * this function gets called again.
5350                          */
5351                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5352
5353                         goto call_complete;
5354                 }
5355         }
5356
5357         /* Remove all pending commands belonging to this request */
5358         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5359         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5360                 if (bt_cb(skb)->req.start) {
5361                         __skb_queue_head(&hdev->cmd_q, skb);
5362                         break;
5363                 }
5364
5365                 req_complete = bt_cb(skb)->req.complete;
5366                 kfree_skb(skb);
5367         }
5368         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5369
5370 call_complete:
5371         if (req_complete)
5372                 req_complete(hdev, status);
5373 }
5374
5375 static void hci_rx_work(struct work_struct *work)
5376 {
5377         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5378         struct sk_buff *skb;
5379
5380         BT_DBG("%s", hdev->name);
5381
5382         while ((skb = skb_dequeue(&hdev->rx_q))) {
5383                 /* Send copy to monitor */
5384                 hci_send_to_monitor(hdev, skb);
5385
5386                 if (atomic_read(&hdev->promisc)) {
5387                         /* Send copy to the sockets */
5388                         hci_send_to_sock(hdev, skb);
5389                 }
5390
5391                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5392                         kfree_skb(skb);
5393                         continue;
5394                 }
5395
5396                 if (test_bit(HCI_INIT, &hdev->flags)) {
5397                         /* Don't process data packets in this states. */
5398                         switch (bt_cb(skb)->pkt_type) {
5399                         case HCI_ACLDATA_PKT:
5400                         case HCI_SCODATA_PKT:
5401                                 kfree_skb(skb);
5402                                 continue;
5403                         }
5404                 }
5405
5406                 /* Process frame */
5407                 switch (bt_cb(skb)->pkt_type) {
5408                 case HCI_EVENT_PKT:
5409                         BT_DBG("%s Event packet", hdev->name);
5410                         hci_event_packet(hdev, skb);
5411                         break;
5412
5413                 case HCI_ACLDATA_PKT:
5414                         BT_DBG("%s ACL data packet", hdev->name);
5415                         hci_acldata_packet(hdev, skb);
5416                         break;
5417
5418                 case HCI_SCODATA_PKT:
5419                         BT_DBG("%s SCO data packet", hdev->name);
5420                         hci_scodata_packet(hdev, skb);
5421                         break;
5422
5423                 default:
5424                         kfree_skb(skb);
5425                         break;
5426                 }
5427         }
5428 }
5429
5430 static void hci_cmd_work(struct work_struct *work)
5431 {
5432         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5433         struct sk_buff *skb;
5434
5435         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5436                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5437
5438         /* Send queued commands */
5439         if (atomic_read(&hdev->cmd_cnt)) {
5440                 skb = skb_dequeue(&hdev->cmd_q);
5441                 if (!skb)
5442                         return;
5443
5444                 kfree_skb(hdev->sent_cmd);
5445
5446                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5447                 if (hdev->sent_cmd) {
5448                         atomic_dec(&hdev->cmd_cnt);
5449                         hci_send_frame(hdev, skb);
5450                         if (test_bit(HCI_RESET, &hdev->flags))
5451                                 cancel_delayed_work(&hdev->cmd_timer);
5452                         else
5453                                 schedule_delayed_work(&hdev->cmd_timer,
5454                                                       HCI_CMD_TIMEOUT);
5455                 } else {
5456                         skb_queue_head(&hdev->cmd_q, skb);
5457                         queue_work(hdev->workqueue, &hdev->cmd_work);
5458                 }
5459         }
5460 }
5461
5462 void hci_req_add_le_scan_disable(struct hci_request *req)
5463 {
5464         struct hci_cp_le_set_scan_enable cp;
5465
5466         memset(&cp, 0, sizeof(cp));
5467         cp.enable = LE_SCAN_DISABLE;
5468         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5469 }
5470
5471 static void add_to_white_list(struct hci_request *req,
5472                               struct hci_conn_params *params)
5473 {
5474         struct hci_cp_le_add_to_white_list cp;
5475
5476         cp.bdaddr_type = params->addr_type;
5477         bacpy(&cp.bdaddr, &params->addr);
5478
5479         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5480 }
5481
5482 static u8 update_white_list(struct hci_request *req)
5483 {
5484         struct hci_dev *hdev = req->hdev;
5485         struct hci_conn_params *params;
5486         struct bdaddr_list *b;
5487         uint8_t white_list_entries = 0;
5488
5489         /* Go through the current white list programmed into the
5490          * controller one by one and check if that address is still
5491          * in the list of pending connections or list of devices to
5492          * report. If not present in either list, then queue the
5493          * command to remove it from the controller.
5494          */
5495         list_for_each_entry(b, &hdev->le_white_list, list) {
5496                 struct hci_cp_le_del_from_white_list cp;
5497
5498                 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5499                                               &b->bdaddr, b->bdaddr_type) ||
5500                     hci_pend_le_action_lookup(&hdev->pend_le_reports,
5501                                               &b->bdaddr, b->bdaddr_type)) {
5502                         white_list_entries++;
5503                         continue;
5504                 }
5505
5506                 cp.bdaddr_type = b->bdaddr_type;
5507                 bacpy(&cp.bdaddr, &b->bdaddr);
5508
5509                 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5510                             sizeof(cp), &cp);
5511         }
5512
5513         /* Since all no longer valid white list entries have been
5514          * removed, walk through the list of pending connections
5515          * and ensure that any new device gets programmed into
5516          * the controller.
5517          *
5518          * If the list of the devices is larger than the list of
5519          * available white list entries in the controller, then
5520          * just abort and return filer policy value to not use the
5521          * white list.
5522          */
5523         list_for_each_entry(params, &hdev->pend_le_conns, action) {
5524                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5525                                            &params->addr, params->addr_type))
5526                         continue;
5527
5528                 if (white_list_entries >= hdev->le_white_list_size) {
5529                         /* Select filter policy to accept all advertising */
5530                         return 0x00;
5531                 }
5532
5533                 if (hci_find_irk_by_addr(hdev, &params->addr,
5534                                          params->addr_type)) {
5535                         /* White list can not be used with RPAs */
5536                         return 0x00;
5537                 }
5538
5539                 white_list_entries++;
5540                 add_to_white_list(req, params);
5541         }
5542
5543         /* After adding all new pending connections, walk through
5544          * the list of pending reports and also add these to the
5545          * white list if there is still space.
5546          */
5547         list_for_each_entry(params, &hdev->pend_le_reports, action) {
5548                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5549                                            &params->addr, params->addr_type))
5550                         continue;
5551
5552                 if (white_list_entries >= hdev->le_white_list_size) {
5553                         /* Select filter policy to accept all advertising */
5554                         return 0x00;
5555                 }
5556
5557                 if (hci_find_irk_by_addr(hdev, &params->addr,
5558                                          params->addr_type)) {
5559                         /* White list can not be used with RPAs */
5560                         return 0x00;
5561                 }
5562
5563                 white_list_entries++;
5564                 add_to_white_list(req, params);
5565         }
5566
5567         /* Select filter policy to use white list */
5568         return 0x01;
5569 }
5570
5571 void hci_req_add_le_passive_scan(struct hci_request *req)
5572 {
5573         struct hci_cp_le_set_scan_param param_cp;
5574         struct hci_cp_le_set_scan_enable enable_cp;
5575         struct hci_dev *hdev = req->hdev;
5576         u8 own_addr_type;
5577         u8 filter_policy;
5578
5579         /* Set require_privacy to false since no SCAN_REQ are send
5580          * during passive scanning. Not using an unresolvable address
5581          * here is important so that peer devices using direct
5582          * advertising with our address will be correctly reported
5583          * by the controller.
5584          */
5585         if (hci_update_random_address(req, false, &own_addr_type))
5586                 return;
5587
5588         /* Adding or removing entries from the white list must
5589          * happen before enabling scanning. The controller does
5590          * not allow white list modification while scanning.
5591          */
5592         filter_policy = update_white_list(req);
5593
5594         memset(&param_cp, 0, sizeof(param_cp));
5595         param_cp.type = LE_SCAN_PASSIVE;
5596         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5597         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5598         param_cp.own_address_type = own_addr_type;
5599         param_cp.filter_policy = filter_policy;
5600         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5601                     &param_cp);
5602
5603         memset(&enable_cp, 0, sizeof(enable_cp));
5604         enable_cp.enable = LE_SCAN_ENABLE;
5605         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5606         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5607                     &enable_cp);
5608 }
5609
5610 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5611 {
5612         if (status)
5613                 BT_DBG("HCI request failed to update background scanning: "
5614                        "status 0x%2.2x", status);
5615 }
5616
5617 /* This function controls the background scanning based on hdev->pend_le_conns
5618  * list. If there are pending LE connection we start the background scanning,
5619  * otherwise we stop it.
5620  *
5621  * This function requires the caller holds hdev->lock.
5622  */
5623 void hci_update_background_scan(struct hci_dev *hdev)
5624 {
5625         struct hci_request req;
5626         struct hci_conn *conn;
5627         int err;
5628
5629         if (!test_bit(HCI_UP, &hdev->flags) ||
5630             test_bit(HCI_INIT, &hdev->flags) ||
5631             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5632             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5633             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5634             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5635                 return;
5636
5637         /* No point in doing scanning if LE support hasn't been enabled */
5638         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5639                 return;
5640
5641         /* If discovery is active don't interfere with it */
5642         if (hdev->discovery.state != DISCOVERY_STOPPED)
5643                 return;
5644
5645         hci_req_init(&req, hdev);
5646
5647         if (list_empty(&hdev->pend_le_conns) &&
5648             list_empty(&hdev->pend_le_reports)) {
5649                 /* If there is no pending LE connections or devices
5650                  * to be scanned for, we should stop the background
5651                  * scanning.
5652                  */
5653
5654                 /* If controller is not scanning we are done. */
5655                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5656                         return;
5657
5658                 hci_req_add_le_scan_disable(&req);
5659
5660                 BT_DBG("%s stopping background scanning", hdev->name);
5661         } else {
5662                 /* If there is at least one pending LE connection, we should
5663                  * keep the background scan running.
5664                  */
5665
5666                 /* If controller is connecting, we should not start scanning
5667                  * since some controllers are not able to scan and connect at
5668                  * the same time.
5669                  */
5670                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5671                 if (conn)
5672                         return;
5673
5674                 /* If controller is currently scanning, we stop it to ensure we
5675                  * don't miss any advertising (due to duplicates filter).
5676                  */
5677                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5678                         hci_req_add_le_scan_disable(&req);
5679
5680                 hci_req_add_le_passive_scan(&req);
5681
5682                 BT_DBG("%s starting background scanning", hdev->name);
5683         }
5684
5685         err = hci_req_run(&req, update_background_scan_complete);
5686         if (err)
5687                 BT_ERR("Failed to run HCI request: err %d", err);
5688 }