Bluetooth: Add skeleton functions for debugfs creation
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_RWLOCK(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97         int err;
98
99         if (!test_bit(HCI_UP, &hdev->flags))
100                 return -ENETDOWN;
101
102         if (copy_from_user(buf, user_buf, buf_size))
103                 return -EFAULT;
104
105         buf[buf_size] = '\0';
106         if (strtobool(buf, &enable))
107                 return -EINVAL;
108
109         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
110                 return -EALREADY;
111
112         hci_req_lock(hdev);
113         if (enable)
114                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115                                      HCI_CMD_TIMEOUT);
116         else
117                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118                                      HCI_CMD_TIMEOUT);
119         hci_req_unlock(hdev);
120
121         if (IS_ERR(skb))
122                 return PTR_ERR(skb);
123
124         err = -bt_to_errno(skb->data[0]);
125         kfree_skb(skb);
126
127         if (err < 0)
128                 return err;
129
130         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
131
132         return count;
133 }
134
135 static const struct file_operations dut_mode_fops = {
136         .open           = simple_open,
137         .read           = dut_mode_read,
138         .write          = dut_mode_write,
139         .llseek         = default_llseek,
140 };
141
142 static int features_show(struct seq_file *f, void *ptr)
143 {
144         struct hci_dev *hdev = f->private;
145         u8 p;
146
147         hci_dev_lock(hdev);
148         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
149                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
150                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
151                            hdev->features[p][0], hdev->features[p][1],
152                            hdev->features[p][2], hdev->features[p][3],
153                            hdev->features[p][4], hdev->features[p][5],
154                            hdev->features[p][6], hdev->features[p][7]);
155         }
156         if (lmp_le_capable(hdev))
157                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
158                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
159                            hdev->le_features[0], hdev->le_features[1],
160                            hdev->le_features[2], hdev->le_features[3],
161                            hdev->le_features[4], hdev->le_features[5],
162                            hdev->le_features[6], hdev->le_features[7]);
163         hci_dev_unlock(hdev);
164
165         return 0;
166 }
167
168 static int features_open(struct inode *inode, struct file *file)
169 {
170         return single_open(file, features_show, inode->i_private);
171 }
172
173 static const struct file_operations features_fops = {
174         .open           = features_open,
175         .read           = seq_read,
176         .llseek         = seq_lseek,
177         .release        = single_release,
178 };
179
180 static int blacklist_show(struct seq_file *f, void *p)
181 {
182         struct hci_dev *hdev = f->private;
183         struct bdaddr_list *b;
184
185         hci_dev_lock(hdev);
186         list_for_each_entry(b, &hdev->blacklist, list)
187                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
188         hci_dev_unlock(hdev);
189
190         return 0;
191 }
192
193 static int blacklist_open(struct inode *inode, struct file *file)
194 {
195         return single_open(file, blacklist_show, inode->i_private);
196 }
197
198 static const struct file_operations blacklist_fops = {
199         .open           = blacklist_open,
200         .read           = seq_read,
201         .llseek         = seq_lseek,
202         .release        = single_release,
203 };
204
205 static int uuids_show(struct seq_file *f, void *p)
206 {
207         struct hci_dev *hdev = f->private;
208         struct bt_uuid *uuid;
209
210         hci_dev_lock(hdev);
211         list_for_each_entry(uuid, &hdev->uuids, list) {
212                 u8 i, val[16];
213
214                 /* The Bluetooth UUID values are stored in big endian,
215                  * but with reversed byte order. So convert them into
216                  * the right order for the %pUb modifier.
217                  */
218                 for (i = 0; i < 16; i++)
219                         val[i] = uuid->uuid[15 - i];
220
221                 seq_printf(f, "%pUb\n", val);
222         }
223         hci_dev_unlock(hdev);
224
225         return 0;
226 }
227
228 static int uuids_open(struct inode *inode, struct file *file)
229 {
230         return single_open(file, uuids_show, inode->i_private);
231 }
232
233 static const struct file_operations uuids_fops = {
234         .open           = uuids_open,
235         .read           = seq_read,
236         .llseek         = seq_lseek,
237         .release        = single_release,
238 };
239
240 static int inquiry_cache_show(struct seq_file *f, void *p)
241 {
242         struct hci_dev *hdev = f->private;
243         struct discovery_state *cache = &hdev->discovery;
244         struct inquiry_entry *e;
245
246         hci_dev_lock(hdev);
247
248         list_for_each_entry(e, &cache->all, all) {
249                 struct inquiry_data *data = &e->data;
250                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
251                            &data->bdaddr,
252                            data->pscan_rep_mode, data->pscan_period_mode,
253                            data->pscan_mode, data->dev_class[2],
254                            data->dev_class[1], data->dev_class[0],
255                            __le16_to_cpu(data->clock_offset),
256                            data->rssi, data->ssp_mode, e->timestamp);
257         }
258
259         hci_dev_unlock(hdev);
260
261         return 0;
262 }
263
264 static int inquiry_cache_open(struct inode *inode, struct file *file)
265 {
266         return single_open(file, inquiry_cache_show, inode->i_private);
267 }
268
269 static const struct file_operations inquiry_cache_fops = {
270         .open           = inquiry_cache_open,
271         .read           = seq_read,
272         .llseek         = seq_lseek,
273         .release        = single_release,
274 };
275
276 static int link_keys_show(struct seq_file *f, void *ptr)
277 {
278         struct hci_dev *hdev = f->private;
279         struct link_key *key;
280
281         rcu_read_lock();
282         list_for_each_entry_rcu(key, &hdev->link_keys, list)
283                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
284                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
285         rcu_read_unlock();
286
287         return 0;
288 }
289
290 static int link_keys_open(struct inode *inode, struct file *file)
291 {
292         return single_open(file, link_keys_show, inode->i_private);
293 }
294
295 static const struct file_operations link_keys_fops = {
296         .open           = link_keys_open,
297         .read           = seq_read,
298         .llseek         = seq_lseek,
299         .release        = single_release,
300 };
301
302 static int dev_class_show(struct seq_file *f, void *ptr)
303 {
304         struct hci_dev *hdev = f->private;
305
306         hci_dev_lock(hdev);
307         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
308                    hdev->dev_class[1], hdev->dev_class[0]);
309         hci_dev_unlock(hdev);
310
311         return 0;
312 }
313
314 static int dev_class_open(struct inode *inode, struct file *file)
315 {
316         return single_open(file, dev_class_show, inode->i_private);
317 }
318
319 static const struct file_operations dev_class_fops = {
320         .open           = dev_class_open,
321         .read           = seq_read,
322         .llseek         = seq_lseek,
323         .release        = single_release,
324 };
325
326 static int voice_setting_get(void *data, u64 *val)
327 {
328         struct hci_dev *hdev = data;
329
330         hci_dev_lock(hdev);
331         *val = hdev->voice_setting;
332         hci_dev_unlock(hdev);
333
334         return 0;
335 }
336
337 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
338                         NULL, "0x%4.4llx\n");
339
340 static int auto_accept_delay_set(void *data, u64 val)
341 {
342         struct hci_dev *hdev = data;
343
344         hci_dev_lock(hdev);
345         hdev->auto_accept_delay = val;
346         hci_dev_unlock(hdev);
347
348         return 0;
349 }
350
351 static int auto_accept_delay_get(void *data, u64 *val)
352 {
353         struct hci_dev *hdev = data;
354
355         hci_dev_lock(hdev);
356         *val = hdev->auto_accept_delay;
357         hci_dev_unlock(hdev);
358
359         return 0;
360 }
361
362 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
363                         auto_accept_delay_set, "%llu\n");
364
365 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
366                                      size_t count, loff_t *ppos)
367 {
368         struct hci_dev *hdev = file->private_data;
369         char buf[3];
370
371         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
372         buf[1] = '\n';
373         buf[2] = '\0';
374         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
375 }
376
377 static ssize_t force_sc_support_write(struct file *file,
378                                       const char __user *user_buf,
379                                       size_t count, loff_t *ppos)
380 {
381         struct hci_dev *hdev = file->private_data;
382         char buf[32];
383         size_t buf_size = min(count, (sizeof(buf)-1));
384         bool enable;
385
386         if (test_bit(HCI_UP, &hdev->flags))
387                 return -EBUSY;
388
389         if (copy_from_user(buf, user_buf, buf_size))
390                 return -EFAULT;
391
392         buf[buf_size] = '\0';
393         if (strtobool(buf, &enable))
394                 return -EINVAL;
395
396         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
397                 return -EALREADY;
398
399         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
400
401         return count;
402 }
403
404 static const struct file_operations force_sc_support_fops = {
405         .open           = simple_open,
406         .read           = force_sc_support_read,
407         .write          = force_sc_support_write,
408         .llseek         = default_llseek,
409 };
410
411 static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
412                                        size_t count, loff_t *ppos)
413 {
414         struct hci_dev *hdev = file->private_data;
415         char buf[3];
416
417         buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
418         buf[1] = '\n';
419         buf[2] = '\0';
420         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
421 }
422
423 static ssize_t force_lesc_support_write(struct file *file,
424                                         const char __user *user_buf,
425                                         size_t count, loff_t *ppos)
426 {
427         struct hci_dev *hdev = file->private_data;
428         char buf[32];
429         size_t buf_size = min(count, (sizeof(buf)-1));
430         bool enable;
431
432         if (copy_from_user(buf, user_buf, buf_size))
433                 return -EFAULT;
434
435         buf[buf_size] = '\0';
436         if (strtobool(buf, &enable))
437                 return -EINVAL;
438
439         if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
440                 return -EALREADY;
441
442         change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
443
444         return count;
445 }
446
447 static const struct file_operations force_lesc_support_fops = {
448         .open           = simple_open,
449         .read           = force_lesc_support_read,
450         .write          = force_lesc_support_write,
451         .llseek         = default_llseek,
452 };
453
454 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
455                                  size_t count, loff_t *ppos)
456 {
457         struct hci_dev *hdev = file->private_data;
458         char buf[3];
459
460         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
461         buf[1] = '\n';
462         buf[2] = '\0';
463         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
464 }
465
466 static const struct file_operations sc_only_mode_fops = {
467         .open           = simple_open,
468         .read           = sc_only_mode_read,
469         .llseek         = default_llseek,
470 };
471
472 static int idle_timeout_set(void *data, u64 val)
473 {
474         struct hci_dev *hdev = data;
475
476         if (val != 0 && (val < 500 || val > 3600000))
477                 return -EINVAL;
478
479         hci_dev_lock(hdev);
480         hdev->idle_timeout = val;
481         hci_dev_unlock(hdev);
482
483         return 0;
484 }
485
486 static int idle_timeout_get(void *data, u64 *val)
487 {
488         struct hci_dev *hdev = data;
489
490         hci_dev_lock(hdev);
491         *val = hdev->idle_timeout;
492         hci_dev_unlock(hdev);
493
494         return 0;
495 }
496
497 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
498                         idle_timeout_set, "%llu\n");
499
500 static int rpa_timeout_set(void *data, u64 val)
501 {
502         struct hci_dev *hdev = data;
503
504         /* Require the RPA timeout to be at least 30 seconds and at most
505          * 24 hours.
506          */
507         if (val < 30 || val > (60 * 60 * 24))
508                 return -EINVAL;
509
510         hci_dev_lock(hdev);
511         hdev->rpa_timeout = val;
512         hci_dev_unlock(hdev);
513
514         return 0;
515 }
516
517 static int rpa_timeout_get(void *data, u64 *val)
518 {
519         struct hci_dev *hdev = data;
520
521         hci_dev_lock(hdev);
522         *val = hdev->rpa_timeout;
523         hci_dev_unlock(hdev);
524
525         return 0;
526 }
527
528 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
529                         rpa_timeout_set, "%llu\n");
530
531 static int sniff_min_interval_set(void *data, u64 val)
532 {
533         struct hci_dev *hdev = data;
534
535         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
536                 return -EINVAL;
537
538         hci_dev_lock(hdev);
539         hdev->sniff_min_interval = val;
540         hci_dev_unlock(hdev);
541
542         return 0;
543 }
544
545 static int sniff_min_interval_get(void *data, u64 *val)
546 {
547         struct hci_dev *hdev = data;
548
549         hci_dev_lock(hdev);
550         *val = hdev->sniff_min_interval;
551         hci_dev_unlock(hdev);
552
553         return 0;
554 }
555
556 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
557                         sniff_min_interval_set, "%llu\n");
558
559 static int sniff_max_interval_set(void *data, u64 val)
560 {
561         struct hci_dev *hdev = data;
562
563         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
564                 return -EINVAL;
565
566         hci_dev_lock(hdev);
567         hdev->sniff_max_interval = val;
568         hci_dev_unlock(hdev);
569
570         return 0;
571 }
572
573 static int sniff_max_interval_get(void *data, u64 *val)
574 {
575         struct hci_dev *hdev = data;
576
577         hci_dev_lock(hdev);
578         *val = hdev->sniff_max_interval;
579         hci_dev_unlock(hdev);
580
581         return 0;
582 }
583
584 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
585                         sniff_max_interval_set, "%llu\n");
586
587 static int conn_info_min_age_set(void *data, u64 val)
588 {
589         struct hci_dev *hdev = data;
590
591         if (val == 0 || val > hdev->conn_info_max_age)
592                 return -EINVAL;
593
594         hci_dev_lock(hdev);
595         hdev->conn_info_min_age = val;
596         hci_dev_unlock(hdev);
597
598         return 0;
599 }
600
601 static int conn_info_min_age_get(void *data, u64 *val)
602 {
603         struct hci_dev *hdev = data;
604
605         hci_dev_lock(hdev);
606         *val = hdev->conn_info_min_age;
607         hci_dev_unlock(hdev);
608
609         return 0;
610 }
611
612 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
613                         conn_info_min_age_set, "%llu\n");
614
615 static int conn_info_max_age_set(void *data, u64 val)
616 {
617         struct hci_dev *hdev = data;
618
619         if (val == 0 || val < hdev->conn_info_min_age)
620                 return -EINVAL;
621
622         hci_dev_lock(hdev);
623         hdev->conn_info_max_age = val;
624         hci_dev_unlock(hdev);
625
626         return 0;
627 }
628
629 static int conn_info_max_age_get(void *data, u64 *val)
630 {
631         struct hci_dev *hdev = data;
632
633         hci_dev_lock(hdev);
634         *val = hdev->conn_info_max_age;
635         hci_dev_unlock(hdev);
636
637         return 0;
638 }
639
640 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
641                         conn_info_max_age_set, "%llu\n");
642
643 static int identity_show(struct seq_file *f, void *p)
644 {
645         struct hci_dev *hdev = f->private;
646         bdaddr_t addr;
647         u8 addr_type;
648
649         hci_dev_lock(hdev);
650
651         hci_copy_identity_address(hdev, &addr, &addr_type);
652
653         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
654                    16, hdev->irk, &hdev->rpa);
655
656         hci_dev_unlock(hdev);
657
658         return 0;
659 }
660
661 static int identity_open(struct inode *inode, struct file *file)
662 {
663         return single_open(file, identity_show, inode->i_private);
664 }
665
666 static const struct file_operations identity_fops = {
667         .open           = identity_open,
668         .read           = seq_read,
669         .llseek         = seq_lseek,
670         .release        = single_release,
671 };
672
673 static int random_address_show(struct seq_file *f, void *p)
674 {
675         struct hci_dev *hdev = f->private;
676
677         hci_dev_lock(hdev);
678         seq_printf(f, "%pMR\n", &hdev->random_addr);
679         hci_dev_unlock(hdev);
680
681         return 0;
682 }
683
684 static int random_address_open(struct inode *inode, struct file *file)
685 {
686         return single_open(file, random_address_show, inode->i_private);
687 }
688
689 static const struct file_operations random_address_fops = {
690         .open           = random_address_open,
691         .read           = seq_read,
692         .llseek         = seq_lseek,
693         .release        = single_release,
694 };
695
696 static int static_address_show(struct seq_file *f, void *p)
697 {
698         struct hci_dev *hdev = f->private;
699
700         hci_dev_lock(hdev);
701         seq_printf(f, "%pMR\n", &hdev->static_addr);
702         hci_dev_unlock(hdev);
703
704         return 0;
705 }
706
707 static int static_address_open(struct inode *inode, struct file *file)
708 {
709         return single_open(file, static_address_show, inode->i_private);
710 }
711
712 static const struct file_operations static_address_fops = {
713         .open           = static_address_open,
714         .read           = seq_read,
715         .llseek         = seq_lseek,
716         .release        = single_release,
717 };
718
719 static ssize_t force_static_address_read(struct file *file,
720                                          char __user *user_buf,
721                                          size_t count, loff_t *ppos)
722 {
723         struct hci_dev *hdev = file->private_data;
724         char buf[3];
725
726         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
727         buf[1] = '\n';
728         buf[2] = '\0';
729         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
730 }
731
732 static ssize_t force_static_address_write(struct file *file,
733                                           const char __user *user_buf,
734                                           size_t count, loff_t *ppos)
735 {
736         struct hci_dev *hdev = file->private_data;
737         char buf[32];
738         size_t buf_size = min(count, (sizeof(buf)-1));
739         bool enable;
740
741         if (test_bit(HCI_UP, &hdev->flags))
742                 return -EBUSY;
743
744         if (copy_from_user(buf, user_buf, buf_size))
745                 return -EFAULT;
746
747         buf[buf_size] = '\0';
748         if (strtobool(buf, &enable))
749                 return -EINVAL;
750
751         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
752                 return -EALREADY;
753
754         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
755
756         return count;
757 }
758
759 static const struct file_operations force_static_address_fops = {
760         .open           = simple_open,
761         .read           = force_static_address_read,
762         .write          = force_static_address_write,
763         .llseek         = default_llseek,
764 };
765
766 static int white_list_show(struct seq_file *f, void *ptr)
767 {
768         struct hci_dev *hdev = f->private;
769         struct bdaddr_list *b;
770
771         hci_dev_lock(hdev);
772         list_for_each_entry(b, &hdev->le_white_list, list)
773                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
774         hci_dev_unlock(hdev);
775
776         return 0;
777 }
778
779 static int white_list_open(struct inode *inode, struct file *file)
780 {
781         return single_open(file, white_list_show, inode->i_private);
782 }
783
784 static const struct file_operations white_list_fops = {
785         .open           = white_list_open,
786         .read           = seq_read,
787         .llseek         = seq_lseek,
788         .release        = single_release,
789 };
790
791 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
792 {
793         struct hci_dev *hdev = f->private;
794         struct smp_irk *irk;
795
796         rcu_read_lock();
797         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
798                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
799                            &irk->bdaddr, irk->addr_type,
800                            16, irk->val, &irk->rpa);
801         }
802         rcu_read_unlock();
803
804         return 0;
805 }
806
807 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
808 {
809         return single_open(file, identity_resolving_keys_show,
810                            inode->i_private);
811 }
812
813 static const struct file_operations identity_resolving_keys_fops = {
814         .open           = identity_resolving_keys_open,
815         .read           = seq_read,
816         .llseek         = seq_lseek,
817         .release        = single_release,
818 };
819
820 static int long_term_keys_show(struct seq_file *f, void *ptr)
821 {
822         struct hci_dev *hdev = f->private;
823         struct smp_ltk *ltk;
824
825         rcu_read_lock();
826         list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
827                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
828                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
829                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
830                            __le64_to_cpu(ltk->rand), 16, ltk->val);
831         rcu_read_unlock();
832
833         return 0;
834 }
835
836 static int long_term_keys_open(struct inode *inode, struct file *file)
837 {
838         return single_open(file, long_term_keys_show, inode->i_private);
839 }
840
841 static const struct file_operations long_term_keys_fops = {
842         .open           = long_term_keys_open,
843         .read           = seq_read,
844         .llseek         = seq_lseek,
845         .release        = single_release,
846 };
847
848 static int conn_min_interval_set(void *data, u64 val)
849 {
850         struct hci_dev *hdev = data;
851
852         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
853                 return -EINVAL;
854
855         hci_dev_lock(hdev);
856         hdev->le_conn_min_interval = val;
857         hci_dev_unlock(hdev);
858
859         return 0;
860 }
861
862 static int conn_min_interval_get(void *data, u64 *val)
863 {
864         struct hci_dev *hdev = data;
865
866         hci_dev_lock(hdev);
867         *val = hdev->le_conn_min_interval;
868         hci_dev_unlock(hdev);
869
870         return 0;
871 }
872
873 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
874                         conn_min_interval_set, "%llu\n");
875
876 static int conn_max_interval_set(void *data, u64 val)
877 {
878         struct hci_dev *hdev = data;
879
880         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
881                 return -EINVAL;
882
883         hci_dev_lock(hdev);
884         hdev->le_conn_max_interval = val;
885         hci_dev_unlock(hdev);
886
887         return 0;
888 }
889
890 static int conn_max_interval_get(void *data, u64 *val)
891 {
892         struct hci_dev *hdev = data;
893
894         hci_dev_lock(hdev);
895         *val = hdev->le_conn_max_interval;
896         hci_dev_unlock(hdev);
897
898         return 0;
899 }
900
901 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
902                         conn_max_interval_set, "%llu\n");
903
904 static int conn_latency_set(void *data, u64 val)
905 {
906         struct hci_dev *hdev = data;
907
908         if (val > 0x01f3)
909                 return -EINVAL;
910
911         hci_dev_lock(hdev);
912         hdev->le_conn_latency = val;
913         hci_dev_unlock(hdev);
914
915         return 0;
916 }
917
918 static int conn_latency_get(void *data, u64 *val)
919 {
920         struct hci_dev *hdev = data;
921
922         hci_dev_lock(hdev);
923         *val = hdev->le_conn_latency;
924         hci_dev_unlock(hdev);
925
926         return 0;
927 }
928
929 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
930                         conn_latency_set, "%llu\n");
931
932 static int supervision_timeout_set(void *data, u64 val)
933 {
934         struct hci_dev *hdev = data;
935
936         if (val < 0x000a || val > 0x0c80)
937                 return -EINVAL;
938
939         hci_dev_lock(hdev);
940         hdev->le_supv_timeout = val;
941         hci_dev_unlock(hdev);
942
943         return 0;
944 }
945
946 static int supervision_timeout_get(void *data, u64 *val)
947 {
948         struct hci_dev *hdev = data;
949
950         hci_dev_lock(hdev);
951         *val = hdev->le_supv_timeout;
952         hci_dev_unlock(hdev);
953
954         return 0;
955 }
956
957 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
958                         supervision_timeout_set, "%llu\n");
959
960 static int adv_channel_map_set(void *data, u64 val)
961 {
962         struct hci_dev *hdev = data;
963
964         if (val < 0x01 || val > 0x07)
965                 return -EINVAL;
966
967         hci_dev_lock(hdev);
968         hdev->le_adv_channel_map = val;
969         hci_dev_unlock(hdev);
970
971         return 0;
972 }
973
974 static int adv_channel_map_get(void *data, u64 *val)
975 {
976         struct hci_dev *hdev = data;
977
978         hci_dev_lock(hdev);
979         *val = hdev->le_adv_channel_map;
980         hci_dev_unlock(hdev);
981
982         return 0;
983 }
984
985 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
986                         adv_channel_map_set, "%llu\n");
987
988 static int adv_min_interval_set(void *data, u64 val)
989 {
990         struct hci_dev *hdev = data;
991
992         if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
993                 return -EINVAL;
994
995         hci_dev_lock(hdev);
996         hdev->le_adv_min_interval = val;
997         hci_dev_unlock(hdev);
998
999         return 0;
1000 }
1001
1002 static int adv_min_interval_get(void *data, u64 *val)
1003 {
1004         struct hci_dev *hdev = data;
1005
1006         hci_dev_lock(hdev);
1007         *val = hdev->le_adv_min_interval;
1008         hci_dev_unlock(hdev);
1009
1010         return 0;
1011 }
1012
1013 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1014                         adv_min_interval_set, "%llu\n");
1015
1016 static int adv_max_interval_set(void *data, u64 val)
1017 {
1018         struct hci_dev *hdev = data;
1019
1020         if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1021                 return -EINVAL;
1022
1023         hci_dev_lock(hdev);
1024         hdev->le_adv_max_interval = val;
1025         hci_dev_unlock(hdev);
1026
1027         return 0;
1028 }
1029
1030 static int adv_max_interval_get(void *data, u64 *val)
1031 {
1032         struct hci_dev *hdev = data;
1033
1034         hci_dev_lock(hdev);
1035         *val = hdev->le_adv_max_interval;
1036         hci_dev_unlock(hdev);
1037
1038         return 0;
1039 }
1040
1041 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1042                         adv_max_interval_set, "%llu\n");
1043
1044 static int device_list_show(struct seq_file *f, void *ptr)
1045 {
1046         struct hci_dev *hdev = f->private;
1047         struct hci_conn_params *p;
1048         struct bdaddr_list *b;
1049
1050         hci_dev_lock(hdev);
1051         list_for_each_entry(b, &hdev->whitelist, list)
1052                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
1053         list_for_each_entry(p, &hdev->le_conn_params, list) {
1054                 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
1055                            p->auto_connect);
1056         }
1057         hci_dev_unlock(hdev);
1058
1059         return 0;
1060 }
1061
1062 static int device_list_open(struct inode *inode, struct file *file)
1063 {
1064         return single_open(file, device_list_show, inode->i_private);
1065 }
1066
1067 static const struct file_operations device_list_fops = {
1068         .open           = device_list_open,
1069         .read           = seq_read,
1070         .llseek         = seq_lseek,
1071         .release        = single_release,
1072 };
1073
1074 /* ---- HCI requests ---- */
1075
1076 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1077 {
1078         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1079
1080         if (hdev->req_status == HCI_REQ_PEND) {
1081                 hdev->req_result = result;
1082                 hdev->req_status = HCI_REQ_DONE;
1083                 wake_up_interruptible(&hdev->req_wait_q);
1084         }
1085 }
1086
1087 static void hci_req_cancel(struct hci_dev *hdev, int err)
1088 {
1089         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1090
1091         if (hdev->req_status == HCI_REQ_PEND) {
1092                 hdev->req_result = err;
1093                 hdev->req_status = HCI_REQ_CANCELED;
1094                 wake_up_interruptible(&hdev->req_wait_q);
1095         }
1096 }
1097
1098 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1099                                             u8 event)
1100 {
1101         struct hci_ev_cmd_complete *ev;
1102         struct hci_event_hdr *hdr;
1103         struct sk_buff *skb;
1104
1105         hci_dev_lock(hdev);
1106
1107         skb = hdev->recv_evt;
1108         hdev->recv_evt = NULL;
1109
1110         hci_dev_unlock(hdev);
1111
1112         if (!skb)
1113                 return ERR_PTR(-ENODATA);
1114
1115         if (skb->len < sizeof(*hdr)) {
1116                 BT_ERR("Too short HCI event");
1117                 goto failed;
1118         }
1119
1120         hdr = (void *) skb->data;
1121         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1122
1123         if (event) {
1124                 if (hdr->evt != event)
1125                         goto failed;
1126                 return skb;
1127         }
1128
1129         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1130                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1131                 goto failed;
1132         }
1133
1134         if (skb->len < sizeof(*ev)) {
1135                 BT_ERR("Too short cmd_complete event");
1136                 goto failed;
1137         }
1138
1139         ev = (void *) skb->data;
1140         skb_pull(skb, sizeof(*ev));
1141
1142         if (opcode == __le16_to_cpu(ev->opcode))
1143                 return skb;
1144
1145         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1146                __le16_to_cpu(ev->opcode));
1147
1148 failed:
1149         kfree_skb(skb);
1150         return ERR_PTR(-ENODATA);
1151 }
1152
1153 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1154                                   const void *param, u8 event, u32 timeout)
1155 {
1156         DECLARE_WAITQUEUE(wait, current);
1157         struct hci_request req;
1158         int err = 0;
1159
1160         BT_DBG("%s", hdev->name);
1161
1162         hci_req_init(&req, hdev);
1163
1164         hci_req_add_ev(&req, opcode, plen, param, event);
1165
1166         hdev->req_status = HCI_REQ_PEND;
1167
1168         add_wait_queue(&hdev->req_wait_q, &wait);
1169         set_current_state(TASK_INTERRUPTIBLE);
1170
1171         err = hci_req_run(&req, hci_req_sync_complete);
1172         if (err < 0) {
1173                 remove_wait_queue(&hdev->req_wait_q, &wait);
1174                 set_current_state(TASK_RUNNING);
1175                 return ERR_PTR(err);
1176         }
1177
1178         schedule_timeout(timeout);
1179
1180         remove_wait_queue(&hdev->req_wait_q, &wait);
1181
1182         if (signal_pending(current))
1183                 return ERR_PTR(-EINTR);
1184
1185         switch (hdev->req_status) {
1186         case HCI_REQ_DONE:
1187                 err = -bt_to_errno(hdev->req_result);
1188                 break;
1189
1190         case HCI_REQ_CANCELED:
1191                 err = -hdev->req_result;
1192                 break;
1193
1194         default:
1195                 err = -ETIMEDOUT;
1196                 break;
1197         }
1198
1199         hdev->req_status = hdev->req_result = 0;
1200
1201         BT_DBG("%s end: err %d", hdev->name, err);
1202
1203         if (err < 0)
1204                 return ERR_PTR(err);
1205
1206         return hci_get_cmd_complete(hdev, opcode, event);
1207 }
1208 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1209
1210 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1211                                const void *param, u32 timeout)
1212 {
1213         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1214 }
1215 EXPORT_SYMBOL(__hci_cmd_sync);
1216
1217 /* Execute request and wait for completion. */
1218 static int __hci_req_sync(struct hci_dev *hdev,
1219                           void (*func)(struct hci_request *req,
1220                                       unsigned long opt),
1221                           unsigned long opt, __u32 timeout)
1222 {
1223         struct hci_request req;
1224         DECLARE_WAITQUEUE(wait, current);
1225         int err = 0;
1226
1227         BT_DBG("%s start", hdev->name);
1228
1229         hci_req_init(&req, hdev);
1230
1231         hdev->req_status = HCI_REQ_PEND;
1232
1233         func(&req, opt);
1234
1235         add_wait_queue(&hdev->req_wait_q, &wait);
1236         set_current_state(TASK_INTERRUPTIBLE);
1237
1238         err = hci_req_run(&req, hci_req_sync_complete);
1239         if (err < 0) {
1240                 hdev->req_status = 0;
1241
1242                 remove_wait_queue(&hdev->req_wait_q, &wait);
1243                 set_current_state(TASK_RUNNING);
1244
1245                 /* ENODATA means the HCI request command queue is empty.
1246                  * This can happen when a request with conditionals doesn't
1247                  * trigger any commands to be sent. This is normal behavior
1248                  * and should not trigger an error return.
1249                  */
1250                 if (err == -ENODATA)
1251                         return 0;
1252
1253                 return err;
1254         }
1255
1256         schedule_timeout(timeout);
1257
1258         remove_wait_queue(&hdev->req_wait_q, &wait);
1259
1260         if (signal_pending(current))
1261                 return -EINTR;
1262
1263         switch (hdev->req_status) {
1264         case HCI_REQ_DONE:
1265                 err = -bt_to_errno(hdev->req_result);
1266                 break;
1267
1268         case HCI_REQ_CANCELED:
1269                 err = -hdev->req_result;
1270                 break;
1271
1272         default:
1273                 err = -ETIMEDOUT;
1274                 break;
1275         }
1276
1277         hdev->req_status = hdev->req_result = 0;
1278
1279         BT_DBG("%s end: err %d", hdev->name, err);
1280
1281         return err;
1282 }
1283
1284 static int hci_req_sync(struct hci_dev *hdev,
1285                         void (*req)(struct hci_request *req,
1286                                     unsigned long opt),
1287                         unsigned long opt, __u32 timeout)
1288 {
1289         int ret;
1290
1291         if (!test_bit(HCI_UP, &hdev->flags))
1292                 return -ENETDOWN;
1293
1294         /* Serialize all requests */
1295         hci_req_lock(hdev);
1296         ret = __hci_req_sync(hdev, req, opt, timeout);
1297         hci_req_unlock(hdev);
1298
1299         return ret;
1300 }
1301
1302 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1303 {
1304         BT_DBG("%s %ld", req->hdev->name, opt);
1305
1306         /* Reset device */
1307         set_bit(HCI_RESET, &req->hdev->flags);
1308         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1309 }
1310
1311 static void bredr_init(struct hci_request *req)
1312 {
1313         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1314
1315         /* Read Local Supported Features */
1316         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1317
1318         /* Read Local Version */
1319         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1320
1321         /* Read BD Address */
1322         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1323 }
1324
1325 static void amp_init(struct hci_request *req)
1326 {
1327         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1328
1329         /* Read Local Version */
1330         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1331
1332         /* Read Local Supported Commands */
1333         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1334
1335         /* Read Local Supported Features */
1336         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1337
1338         /* Read Local AMP Info */
1339         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1340
1341         /* Read Data Blk size */
1342         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1343
1344         /* Read Flow Control Mode */
1345         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1346
1347         /* Read Location Data */
1348         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1349 }
1350
1351 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1352 {
1353         struct hci_dev *hdev = req->hdev;
1354
1355         BT_DBG("%s %ld", hdev->name, opt);
1356
1357         /* Reset */
1358         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1359                 hci_reset_req(req, 0);
1360
1361         switch (hdev->dev_type) {
1362         case HCI_BREDR:
1363                 bredr_init(req);
1364                 break;
1365
1366         case HCI_AMP:
1367                 amp_init(req);
1368                 break;
1369
1370         default:
1371                 BT_ERR("Unknown device type %d", hdev->dev_type);
1372                 break;
1373         }
1374 }
1375
1376 static void bredr_setup(struct hci_request *req)
1377 {
1378         __le16 param;
1379         __u8 flt_type;
1380
1381         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1382         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1383
1384         /* Read Class of Device */
1385         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1386
1387         /* Read Local Name */
1388         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1389
1390         /* Read Voice Setting */
1391         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1392
1393         /* Read Number of Supported IAC */
1394         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
1396         /* Read Current IAC LAP */
1397         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
1399         /* Clear Event Filters */
1400         flt_type = HCI_FLT_CLEAR_ALL;
1401         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1402
1403         /* Connection accept timeout ~20 secs */
1404         param = cpu_to_le16(0x7d00);
1405         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1406 }
1407
1408 static void le_setup(struct hci_request *req)
1409 {
1410         struct hci_dev *hdev = req->hdev;
1411
1412         /* Read LE Buffer Size */
1413         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1414
1415         /* Read LE Local Supported Features */
1416         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1417
1418         /* Read LE Supported States */
1419         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1420
1421         /* Read LE White List Size */
1422         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1423
1424         /* Clear LE White List */
1425         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1426
1427         /* LE-only controllers have LE implicitly enabled */
1428         if (!lmp_bredr_capable(hdev))
1429                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1430 }
1431
1432 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1433 {
1434         if (lmp_ext_inq_capable(hdev))
1435                 return 0x02;
1436
1437         if (lmp_inq_rssi_capable(hdev))
1438                 return 0x01;
1439
1440         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1441             hdev->lmp_subver == 0x0757)
1442                 return 0x01;
1443
1444         if (hdev->manufacturer == 15) {
1445                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1446                         return 0x01;
1447                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1448                         return 0x01;
1449                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1450                         return 0x01;
1451         }
1452
1453         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1454             hdev->lmp_subver == 0x1805)
1455                 return 0x01;
1456
1457         return 0x00;
1458 }
1459
1460 static void hci_setup_inquiry_mode(struct hci_request *req)
1461 {
1462         u8 mode;
1463
1464         mode = hci_get_inquiry_mode(req->hdev);
1465
1466         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1467 }
1468
1469 static void hci_setup_event_mask(struct hci_request *req)
1470 {
1471         struct hci_dev *hdev = req->hdev;
1472
1473         /* The second byte is 0xff instead of 0x9f (two reserved bits
1474          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1475          * command otherwise.
1476          */
1477         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1478
1479         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1480          * any event mask for pre 1.2 devices.
1481          */
1482         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1483                 return;
1484
1485         if (lmp_bredr_capable(hdev)) {
1486                 events[4] |= 0x01; /* Flow Specification Complete */
1487                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1488                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1489                 events[5] |= 0x08; /* Synchronous Connection Complete */
1490                 events[5] |= 0x10; /* Synchronous Connection Changed */
1491         } else {
1492                 /* Use a different default for LE-only devices */
1493                 memset(events, 0, sizeof(events));
1494                 events[0] |= 0x10; /* Disconnection Complete */
1495                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1496                 events[1] |= 0x20; /* Command Complete */
1497                 events[1] |= 0x40; /* Command Status */
1498                 events[1] |= 0x80; /* Hardware Error */
1499                 events[2] |= 0x04; /* Number of Completed Packets */
1500                 events[3] |= 0x02; /* Data Buffer Overflow */
1501
1502                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1503                         events[0] |= 0x80; /* Encryption Change */
1504                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1505                 }
1506         }
1507
1508         if (lmp_inq_rssi_capable(hdev))
1509                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1510
1511         if (lmp_sniffsubr_capable(hdev))
1512                 events[5] |= 0x20; /* Sniff Subrating */
1513
1514         if (lmp_pause_enc_capable(hdev))
1515                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1516
1517         if (lmp_ext_inq_capable(hdev))
1518                 events[5] |= 0x40; /* Extended Inquiry Result */
1519
1520         if (lmp_no_flush_capable(hdev))
1521                 events[7] |= 0x01; /* Enhanced Flush Complete */
1522
1523         if (lmp_lsto_capable(hdev))
1524                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1525
1526         if (lmp_ssp_capable(hdev)) {
1527                 events[6] |= 0x01;      /* IO Capability Request */
1528                 events[6] |= 0x02;      /* IO Capability Response */
1529                 events[6] |= 0x04;      /* User Confirmation Request */
1530                 events[6] |= 0x08;      /* User Passkey Request */
1531                 events[6] |= 0x10;      /* Remote OOB Data Request */
1532                 events[6] |= 0x20;      /* Simple Pairing Complete */
1533                 events[7] |= 0x04;      /* User Passkey Notification */
1534                 events[7] |= 0x08;      /* Keypress Notification */
1535                 events[7] |= 0x10;      /* Remote Host Supported
1536                                          * Features Notification
1537                                          */
1538         }
1539
1540         if (lmp_le_capable(hdev))
1541                 events[7] |= 0x20;      /* LE Meta-Event */
1542
1543         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1544 }
1545
1546 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1547 {
1548         struct hci_dev *hdev = req->hdev;
1549
1550         if (lmp_bredr_capable(hdev))
1551                 bredr_setup(req);
1552         else
1553                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1554
1555         if (lmp_le_capable(hdev))
1556                 le_setup(req);
1557
1558         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1559          * local supported commands HCI command.
1560          */
1561         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1562                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1563
1564         if (lmp_ssp_capable(hdev)) {
1565                 /* When SSP is available, then the host features page
1566                  * should also be available as well. However some
1567                  * controllers list the max_page as 0 as long as SSP
1568                  * has not been enabled. To achieve proper debugging
1569                  * output, force the minimum max_page to 1 at least.
1570                  */
1571                 hdev->max_page = 0x01;
1572
1573                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1574                         u8 mode = 0x01;
1575                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1576                                     sizeof(mode), &mode);
1577                 } else {
1578                         struct hci_cp_write_eir cp;
1579
1580                         memset(hdev->eir, 0, sizeof(hdev->eir));
1581                         memset(&cp, 0, sizeof(cp));
1582
1583                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1584                 }
1585         }
1586
1587         if (lmp_inq_rssi_capable(hdev))
1588                 hci_setup_inquiry_mode(req);
1589
1590         if (lmp_inq_tx_pwr_capable(hdev))
1591                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1592
1593         if (lmp_ext_feat_capable(hdev)) {
1594                 struct hci_cp_read_local_ext_features cp;
1595
1596                 cp.page = 0x01;
1597                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1598                             sizeof(cp), &cp);
1599         }
1600
1601         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1602                 u8 enable = 1;
1603                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1604                             &enable);
1605         }
1606 }
1607
1608 static void hci_setup_link_policy(struct hci_request *req)
1609 {
1610         struct hci_dev *hdev = req->hdev;
1611         struct hci_cp_write_def_link_policy cp;
1612         u16 link_policy = 0;
1613
1614         if (lmp_rswitch_capable(hdev))
1615                 link_policy |= HCI_LP_RSWITCH;
1616         if (lmp_hold_capable(hdev))
1617                 link_policy |= HCI_LP_HOLD;
1618         if (lmp_sniff_capable(hdev))
1619                 link_policy |= HCI_LP_SNIFF;
1620         if (lmp_park_capable(hdev))
1621                 link_policy |= HCI_LP_PARK;
1622
1623         cp.policy = cpu_to_le16(link_policy);
1624         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1625 }
1626
1627 static void hci_set_le_support(struct hci_request *req)
1628 {
1629         struct hci_dev *hdev = req->hdev;
1630         struct hci_cp_write_le_host_supported cp;
1631
1632         /* LE-only devices do not support explicit enablement */
1633         if (!lmp_bredr_capable(hdev))
1634                 return;
1635
1636         memset(&cp, 0, sizeof(cp));
1637
1638         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1639                 cp.le = 0x01;
1640                 cp.simul = 0x00;
1641         }
1642
1643         if (cp.le != lmp_host_le_capable(hdev))
1644                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1645                             &cp);
1646 }
1647
1648 static void hci_set_event_mask_page_2(struct hci_request *req)
1649 {
1650         struct hci_dev *hdev = req->hdev;
1651         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1652
1653         /* If Connectionless Slave Broadcast master role is supported
1654          * enable all necessary events for it.
1655          */
1656         if (lmp_csb_master_capable(hdev)) {
1657                 events[1] |= 0x40;      /* Triggered Clock Capture */
1658                 events[1] |= 0x80;      /* Synchronization Train Complete */
1659                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1660                 events[2] |= 0x20;      /* CSB Channel Map Change */
1661         }
1662
1663         /* If Connectionless Slave Broadcast slave role is supported
1664          * enable all necessary events for it.
1665          */
1666         if (lmp_csb_slave_capable(hdev)) {
1667                 events[2] |= 0x01;      /* Synchronization Train Received */
1668                 events[2] |= 0x02;      /* CSB Receive */
1669                 events[2] |= 0x04;      /* CSB Timeout */
1670                 events[2] |= 0x08;      /* Truncated Page Complete */
1671         }
1672
1673         /* Enable Authenticated Payload Timeout Expired event if supported */
1674         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1675                 events[2] |= 0x80;
1676
1677         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1678 }
1679
1680 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1681 {
1682         struct hci_dev *hdev = req->hdev;
1683         u8 p;
1684
1685         hci_setup_event_mask(req);
1686
1687         /* Some Broadcom based Bluetooth controllers do not support the
1688          * Delete Stored Link Key command. They are clearly indicating its
1689          * absence in the bit mask of supported commands.
1690          *
1691          * Check the supported commands and only if the the command is marked
1692          * as supported send it. If not supported assume that the controller
1693          * does not have actual support for stored link keys which makes this
1694          * command redundant anyway.
1695          *
1696          * Some controllers indicate that they support handling deleting
1697          * stored link keys, but they don't. The quirk lets a driver
1698          * just disable this command.
1699          */
1700         if (hdev->commands[6] & 0x80 &&
1701             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1702                 struct hci_cp_delete_stored_link_key cp;
1703
1704                 bacpy(&cp.bdaddr, BDADDR_ANY);
1705                 cp.delete_all = 0x01;
1706                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1707                             sizeof(cp), &cp);
1708         }
1709
1710         if (hdev->commands[5] & 0x10)
1711                 hci_setup_link_policy(req);
1712
1713         if (hdev->commands[8] & 0x01)
1714                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1715
1716         /* Some older Broadcom based Bluetooth 1.2 controllers do not
1717          * support the Read Page Scan Type command. Check support for
1718          * this command in the bit mask of supported commands.
1719          */
1720         if (hdev->commands[13] & 0x01)
1721                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1722
1723         if (lmp_le_capable(hdev)) {
1724                 u8 events[8];
1725
1726                 memset(events, 0, sizeof(events));
1727                 events[0] = 0x0f;
1728
1729                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1730                         events[0] |= 0x10;      /* LE Long Term Key Request */
1731
1732                 /* If controller supports the Connection Parameters Request
1733                  * Link Layer Procedure, enable the corresponding event.
1734                  */
1735                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1736                         events[0] |= 0x20;      /* LE Remote Connection
1737                                                  * Parameter Request
1738                                                  */
1739
1740                 /* If the controller supports Extended Scanner Filter
1741                  * Policies, enable the correspondig event.
1742                  */
1743                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
1744                         events[1] |= 0x04;      /* LE Direct Advertising
1745                                                  * Report
1746                                                  */
1747
1748                 /* If the controller supports the LE Read Local P-256
1749                  * Public Key command, enable the corresponding event.
1750                  */
1751                 if (hdev->commands[34] & 0x02)
1752                         events[0] |= 0x80;      /* LE Read Local P-256
1753                                                  * Public Key Complete
1754                                                  */
1755
1756                 /* If the controller supports the LE Generate DHKey
1757                  * command, enable the corresponding event.
1758                  */
1759                 if (hdev->commands[34] & 0x04)
1760                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
1761
1762                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1763                             events);
1764
1765                 if (hdev->commands[25] & 0x40) {
1766                         /* Read LE Advertising Channel TX Power */
1767                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1768                 }
1769
1770                 hci_set_le_support(req);
1771         }
1772
1773         /* Read features beyond page 1 if available */
1774         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1775                 struct hci_cp_read_local_ext_features cp;
1776
1777                 cp.page = p;
1778                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1779                             sizeof(cp), &cp);
1780         }
1781 }
1782
1783 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1784 {
1785         struct hci_dev *hdev = req->hdev;
1786
1787         /* Set event mask page 2 if the HCI command for it is supported */
1788         if (hdev->commands[22] & 0x04)
1789                 hci_set_event_mask_page_2(req);
1790
1791         /* Read local codec list if the HCI command is supported */
1792         if (hdev->commands[29] & 0x20)
1793                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1794
1795         /* Get MWS transport configuration if the HCI command is supported */
1796         if (hdev->commands[30] & 0x08)
1797                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1798
1799         /* Check for Synchronization Train support */
1800         if (lmp_sync_train_capable(hdev))
1801                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1802
1803         /* Enable Secure Connections if supported and configured */
1804         if (bredr_sc_enabled(hdev)) {
1805                 u8 support = 0x01;
1806                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1807                             sizeof(support), &support);
1808         }
1809 }
1810
1811 static int __hci_init(struct hci_dev *hdev)
1812 {
1813         int err;
1814
1815         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1816         if (err < 0)
1817                 return err;
1818
1819         /* The Device Under Test (DUT) mode is special and available for
1820          * all controller types. So just create it early on.
1821          */
1822         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1823                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1824                                     &dut_mode_fops);
1825         }
1826
1827         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1828          * BR/EDR/LE type controllers. AMP controllers only need the
1829          * first stage init.
1830          */
1831         if (hdev->dev_type != HCI_BREDR)
1832                 return 0;
1833
1834         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1835         if (err < 0)
1836                 return err;
1837
1838         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1839         if (err < 0)
1840                 return err;
1841
1842         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1843         if (err < 0)
1844                 return err;
1845
1846         /* Only create debugfs entries during the initial setup
1847          * phase and not every time the controller gets powered on.
1848          */
1849         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1850                 return 0;
1851
1852         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1853                             &features_fops);
1854         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1855                            &hdev->manufacturer);
1856         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1857         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1858         debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1859                             &device_list_fops);
1860         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1861                             &blacklist_fops);
1862         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1863
1864         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1865                             &conn_info_min_age_fops);
1866         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1867                             &conn_info_max_age_fops);
1868
1869         hci_debugfs_create_common(hdev);
1870
1871         if (lmp_bredr_capable(hdev)) {
1872                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1873                                     hdev, &inquiry_cache_fops);
1874                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1875                                     hdev, &link_keys_fops);
1876                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1877                                     hdev, &dev_class_fops);
1878                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1879                                     hdev, &voice_setting_fops);
1880
1881                 hci_debugfs_create_bredr(hdev);
1882         }
1883
1884         if (lmp_ssp_capable(hdev)) {
1885                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1886                                     hdev, &auto_accept_delay_fops);
1887                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1888                                     hdev, &force_sc_support_fops);
1889                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1890                                     hdev, &sc_only_mode_fops);
1891                 if (lmp_le_capable(hdev))
1892                         debugfs_create_file("force_lesc_support", 0644,
1893                                             hdev->debugfs, hdev,
1894                                             &force_lesc_support_fops);
1895         }
1896
1897         if (lmp_sniff_capable(hdev)) {
1898                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1899                                     hdev, &idle_timeout_fops);
1900                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1901                                     hdev, &sniff_min_interval_fops);
1902                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1903                                     hdev, &sniff_max_interval_fops);
1904         }
1905
1906         if (lmp_le_capable(hdev)) {
1907                 debugfs_create_file("identity", 0400, hdev->debugfs,
1908                                     hdev, &identity_fops);
1909                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1910                                     hdev, &rpa_timeout_fops);
1911                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1912                                     hdev, &random_address_fops);
1913                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1914                                     hdev, &static_address_fops);
1915
1916                 /* For controllers with a public address, provide a debug
1917                  * option to force the usage of the configured static
1918                  * address. By default the public address is used.
1919                  */
1920                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1921                         debugfs_create_file("force_static_address", 0644,
1922                                             hdev->debugfs, hdev,
1923                                             &force_static_address_fops);
1924
1925                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1926                                   &hdev->le_white_list_size);
1927                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1928                                     &white_list_fops);
1929                 debugfs_create_file("identity_resolving_keys", 0400,
1930                                     hdev->debugfs, hdev,
1931                                     &identity_resolving_keys_fops);
1932                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1933                                     hdev, &long_term_keys_fops);
1934                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1935                                     hdev, &conn_min_interval_fops);
1936                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1937                                     hdev, &conn_max_interval_fops);
1938                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1939                                     hdev, &conn_latency_fops);
1940                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1941                                     hdev, &supervision_timeout_fops);
1942                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1943                                     hdev, &adv_channel_map_fops);
1944                 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1945                                     hdev, &adv_min_interval_fops);
1946                 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1947                                     hdev, &adv_max_interval_fops);
1948                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1949                                    hdev->debugfs,
1950                                    &hdev->discov_interleaved_timeout);
1951
1952                 hci_debugfs_create_le(hdev);
1953
1954                 smp_register(hdev);
1955         }
1956
1957         return 0;
1958 }
1959
1960 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1961 {
1962         struct hci_dev *hdev = req->hdev;
1963
1964         BT_DBG("%s %ld", hdev->name, opt);
1965
1966         /* Reset */
1967         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1968                 hci_reset_req(req, 0);
1969
1970         /* Read Local Version */
1971         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1972
1973         /* Read BD Address */
1974         if (hdev->set_bdaddr)
1975                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1976 }
1977
1978 static int __hci_unconf_init(struct hci_dev *hdev)
1979 {
1980         int err;
1981
1982         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1983                 return 0;
1984
1985         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1986         if (err < 0)
1987                 return err;
1988
1989         return 0;
1990 }
1991
1992 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1993 {
1994         __u8 scan = opt;
1995
1996         BT_DBG("%s %x", req->hdev->name, scan);
1997
1998         /* Inquiry and Page scans */
1999         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2000 }
2001
2002 static void hci_auth_req(struct hci_request *req, unsigned long opt)
2003 {
2004         __u8 auth = opt;
2005
2006         BT_DBG("%s %x", req->hdev->name, auth);
2007
2008         /* Authentication */
2009         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
2010 }
2011
2012 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
2013 {
2014         __u8 encrypt = opt;
2015
2016         BT_DBG("%s %x", req->hdev->name, encrypt);
2017
2018         /* Encryption */
2019         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
2020 }
2021
2022 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
2023 {
2024         __le16 policy = cpu_to_le16(opt);
2025
2026         BT_DBG("%s %x", req->hdev->name, policy);
2027
2028         /* Default link policy */
2029         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
2030 }
2031
2032 /* Get HCI device by index.
2033  * Device is held on return. */
2034 struct hci_dev *hci_dev_get(int index)
2035 {
2036         struct hci_dev *hdev = NULL, *d;
2037
2038         BT_DBG("%d", index);
2039
2040         if (index < 0)
2041                 return NULL;
2042
2043         read_lock(&hci_dev_list_lock);
2044         list_for_each_entry(d, &hci_dev_list, list) {
2045                 if (d->id == index) {
2046                         hdev = hci_dev_hold(d);
2047                         break;
2048                 }
2049         }
2050         read_unlock(&hci_dev_list_lock);
2051         return hdev;
2052 }
2053
2054 /* ---- Inquiry support ---- */
2055
2056 bool hci_discovery_active(struct hci_dev *hdev)
2057 {
2058         struct discovery_state *discov = &hdev->discovery;
2059
2060         switch (discov->state) {
2061         case DISCOVERY_FINDING:
2062         case DISCOVERY_RESOLVING:
2063                 return true;
2064
2065         default:
2066                 return false;
2067         }
2068 }
2069
2070 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2071 {
2072         int old_state = hdev->discovery.state;
2073
2074         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2075
2076         if (old_state == state)
2077                 return;
2078
2079         hdev->discovery.state = state;
2080
2081         switch (state) {
2082         case DISCOVERY_STOPPED:
2083                 hci_update_background_scan(hdev);
2084
2085                 if (old_state != DISCOVERY_STARTING)
2086                         mgmt_discovering(hdev, 0);
2087                 break;
2088         case DISCOVERY_STARTING:
2089                 break;
2090         case DISCOVERY_FINDING:
2091                 mgmt_discovering(hdev, 1);
2092                 break;
2093         case DISCOVERY_RESOLVING:
2094                 break;
2095         case DISCOVERY_STOPPING:
2096                 break;
2097         }
2098 }
2099
2100 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2101 {
2102         struct discovery_state *cache = &hdev->discovery;
2103         struct inquiry_entry *p, *n;
2104
2105         list_for_each_entry_safe(p, n, &cache->all, all) {
2106                 list_del(&p->all);
2107                 kfree(p);
2108         }
2109
2110         INIT_LIST_HEAD(&cache->unknown);
2111         INIT_LIST_HEAD(&cache->resolve);
2112 }
2113
2114 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2115                                                bdaddr_t *bdaddr)
2116 {
2117         struct discovery_state *cache = &hdev->discovery;
2118         struct inquiry_entry *e;
2119
2120         BT_DBG("cache %p, %pMR", cache, bdaddr);
2121
2122         list_for_each_entry(e, &cache->all, all) {
2123                 if (!bacmp(&e->data.bdaddr, bdaddr))
2124                         return e;
2125         }
2126
2127         return NULL;
2128 }
2129
2130 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2131                                                        bdaddr_t *bdaddr)
2132 {
2133         struct discovery_state *cache = &hdev->discovery;
2134         struct inquiry_entry *e;
2135
2136         BT_DBG("cache %p, %pMR", cache, bdaddr);
2137
2138         list_for_each_entry(e, &cache->unknown, list) {
2139                 if (!bacmp(&e->data.bdaddr, bdaddr))
2140                         return e;
2141         }
2142
2143         return NULL;
2144 }
2145
2146 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2147                                                        bdaddr_t *bdaddr,
2148                                                        int state)
2149 {
2150         struct discovery_state *cache = &hdev->discovery;
2151         struct inquiry_entry *e;
2152
2153         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2154
2155         list_for_each_entry(e, &cache->resolve, list) {
2156                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2157                         return e;
2158                 if (!bacmp(&e->data.bdaddr, bdaddr))
2159                         return e;
2160         }
2161
2162         return NULL;
2163 }
2164
2165 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2166                                       struct inquiry_entry *ie)
2167 {
2168         struct discovery_state *cache = &hdev->discovery;
2169         struct list_head *pos = &cache->resolve;
2170         struct inquiry_entry *p;
2171
2172         list_del(&ie->list);
2173
2174         list_for_each_entry(p, &cache->resolve, list) {
2175                 if (p->name_state != NAME_PENDING &&
2176                     abs(p->data.rssi) >= abs(ie->data.rssi))
2177                         break;
2178                 pos = &p->list;
2179         }
2180
2181         list_add(&ie->list, pos);
2182 }
2183
2184 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2185                              bool name_known)
2186 {
2187         struct discovery_state *cache = &hdev->discovery;
2188         struct inquiry_entry *ie;
2189         u32 flags = 0;
2190
2191         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2192
2193         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2194
2195         if (!data->ssp_mode)
2196                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2197
2198         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2199         if (ie) {
2200                 if (!ie->data.ssp_mode)
2201                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2202
2203                 if (ie->name_state == NAME_NEEDED &&
2204                     data->rssi != ie->data.rssi) {
2205                         ie->data.rssi = data->rssi;
2206                         hci_inquiry_cache_update_resolve(hdev, ie);
2207                 }
2208
2209                 goto update;
2210         }
2211
2212         /* Entry not in the cache. Add new one. */
2213         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2214         if (!ie) {
2215                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2216                 goto done;
2217         }
2218
2219         list_add(&ie->all, &cache->all);
2220
2221         if (name_known) {
2222                 ie->name_state = NAME_KNOWN;
2223         } else {
2224                 ie->name_state = NAME_NOT_KNOWN;
2225                 list_add(&ie->list, &cache->unknown);
2226         }
2227
2228 update:
2229         if (name_known && ie->name_state != NAME_KNOWN &&
2230             ie->name_state != NAME_PENDING) {
2231                 ie->name_state = NAME_KNOWN;
2232                 list_del(&ie->list);
2233         }
2234
2235         memcpy(&ie->data, data, sizeof(*data));
2236         ie->timestamp = jiffies;
2237         cache->timestamp = jiffies;
2238
2239         if (ie->name_state == NAME_NOT_KNOWN)
2240                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2241
2242 done:
2243         return flags;
2244 }
2245
2246 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2247 {
2248         struct discovery_state *cache = &hdev->discovery;
2249         struct inquiry_info *info = (struct inquiry_info *) buf;
2250         struct inquiry_entry *e;
2251         int copied = 0;
2252
2253         list_for_each_entry(e, &cache->all, all) {
2254                 struct inquiry_data *data = &e->data;
2255
2256                 if (copied >= num)
2257                         break;
2258
2259                 bacpy(&info->bdaddr, &data->bdaddr);
2260                 info->pscan_rep_mode    = data->pscan_rep_mode;
2261                 info->pscan_period_mode = data->pscan_period_mode;
2262                 info->pscan_mode        = data->pscan_mode;
2263                 memcpy(info->dev_class, data->dev_class, 3);
2264                 info->clock_offset      = data->clock_offset;
2265
2266                 info++;
2267                 copied++;
2268         }
2269
2270         BT_DBG("cache %p, copied %d", cache, copied);
2271         return copied;
2272 }
2273
2274 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2275 {
2276         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2277         struct hci_dev *hdev = req->hdev;
2278         struct hci_cp_inquiry cp;
2279
2280         BT_DBG("%s", hdev->name);
2281
2282         if (test_bit(HCI_INQUIRY, &hdev->flags))
2283                 return;
2284
2285         /* Start Inquiry */
2286         memcpy(&cp.lap, &ir->lap, 3);
2287         cp.length  = ir->length;
2288         cp.num_rsp = ir->num_rsp;
2289         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2290 }
2291
2292 int hci_inquiry(void __user *arg)
2293 {
2294         __u8 __user *ptr = arg;
2295         struct hci_inquiry_req ir;
2296         struct hci_dev *hdev;
2297         int err = 0, do_inquiry = 0, max_rsp;
2298         long timeo;
2299         __u8 *buf;
2300
2301         if (copy_from_user(&ir, ptr, sizeof(ir)))
2302                 return -EFAULT;
2303
2304         hdev = hci_dev_get(ir.dev_id);
2305         if (!hdev)
2306                 return -ENODEV;
2307
2308         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2309                 err = -EBUSY;
2310                 goto done;
2311         }
2312
2313         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2314                 err = -EOPNOTSUPP;
2315                 goto done;
2316         }
2317
2318         if (hdev->dev_type != HCI_BREDR) {
2319                 err = -EOPNOTSUPP;
2320                 goto done;
2321         }
2322
2323         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2324                 err = -EOPNOTSUPP;
2325                 goto done;
2326         }
2327
2328         hci_dev_lock(hdev);
2329         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2330             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2331                 hci_inquiry_cache_flush(hdev);
2332                 do_inquiry = 1;
2333         }
2334         hci_dev_unlock(hdev);
2335
2336         timeo = ir.length * msecs_to_jiffies(2000);
2337
2338         if (do_inquiry) {
2339                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2340                                    timeo);
2341                 if (err < 0)
2342                         goto done;
2343
2344                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2345                  * cleared). If it is interrupted by a signal, return -EINTR.
2346                  */
2347                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2348                                 TASK_INTERRUPTIBLE))
2349                         return -EINTR;
2350         }
2351
2352         /* for unlimited number of responses we will use buffer with
2353          * 255 entries
2354          */
2355         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2356
2357         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2358          * copy it to the user space.
2359          */
2360         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2361         if (!buf) {
2362                 err = -ENOMEM;
2363                 goto done;
2364         }
2365
2366         hci_dev_lock(hdev);
2367         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2368         hci_dev_unlock(hdev);
2369
2370         BT_DBG("num_rsp %d", ir.num_rsp);
2371
2372         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2373                 ptr += sizeof(ir);
2374                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2375                                  ir.num_rsp))
2376                         err = -EFAULT;
2377         } else
2378                 err = -EFAULT;
2379
2380         kfree(buf);
2381
2382 done:
2383         hci_dev_put(hdev);
2384         return err;
2385 }
2386
2387 static int hci_dev_do_open(struct hci_dev *hdev)
2388 {
2389         int ret = 0;
2390
2391         BT_DBG("%s %p", hdev->name, hdev);
2392
2393         hci_req_lock(hdev);
2394
2395         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2396                 ret = -ENODEV;
2397                 goto done;
2398         }
2399
2400         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2401             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2402                 /* Check for rfkill but allow the HCI setup stage to
2403                  * proceed (which in itself doesn't cause any RF activity).
2404                  */
2405                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2406                         ret = -ERFKILL;
2407                         goto done;
2408                 }
2409
2410                 /* Check for valid public address or a configured static
2411                  * random adddress, but let the HCI setup proceed to
2412                  * be able to determine if there is a public address
2413                  * or not.
2414                  *
2415                  * In case of user channel usage, it is not important
2416                  * if a public address or static random address is
2417                  * available.
2418                  *
2419                  * This check is only valid for BR/EDR controllers
2420                  * since AMP controllers do not have an address.
2421                  */
2422                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2423                     hdev->dev_type == HCI_BREDR &&
2424                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2425                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2426                         ret = -EADDRNOTAVAIL;
2427                         goto done;
2428                 }
2429         }
2430
2431         if (test_bit(HCI_UP, &hdev->flags)) {
2432                 ret = -EALREADY;
2433                 goto done;
2434         }
2435
2436         if (hdev->open(hdev)) {
2437                 ret = -EIO;
2438                 goto done;
2439         }
2440
2441         atomic_set(&hdev->cmd_cnt, 1);
2442         set_bit(HCI_INIT, &hdev->flags);
2443
2444         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2445                 if (hdev->setup)
2446                         ret = hdev->setup(hdev);
2447
2448                 /* The transport driver can set these quirks before
2449                  * creating the HCI device or in its setup callback.
2450                  *
2451                  * In case any of them is set, the controller has to
2452                  * start up as unconfigured.
2453                  */
2454                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2455                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2456                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2457
2458                 /* For an unconfigured controller it is required to
2459                  * read at least the version information provided by
2460                  * the Read Local Version Information command.
2461                  *
2462                  * If the set_bdaddr driver callback is provided, then
2463                  * also the original Bluetooth public device address
2464                  * will be read using the Read BD Address command.
2465                  */
2466                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2467                         ret = __hci_unconf_init(hdev);
2468         }
2469
2470         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2471                 /* If public address change is configured, ensure that
2472                  * the address gets programmed. If the driver does not
2473                  * support changing the public address, fail the power
2474                  * on procedure.
2475                  */
2476                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2477                     hdev->set_bdaddr)
2478                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2479                 else
2480                         ret = -EADDRNOTAVAIL;
2481         }
2482
2483         if (!ret) {
2484                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2485                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2486                         ret = __hci_init(hdev);
2487         }
2488
2489         clear_bit(HCI_INIT, &hdev->flags);
2490
2491         if (!ret) {
2492                 hci_dev_hold(hdev);
2493                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2494                 set_bit(HCI_UP, &hdev->flags);
2495                 hci_notify(hdev, HCI_DEV_UP);
2496                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2497                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2498                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2499                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2500                     hdev->dev_type == HCI_BREDR) {
2501                         hci_dev_lock(hdev);
2502                         mgmt_powered(hdev, 1);
2503                         hci_dev_unlock(hdev);
2504                 }
2505         } else {
2506                 /* Init failed, cleanup */
2507                 flush_work(&hdev->tx_work);
2508                 flush_work(&hdev->cmd_work);
2509                 flush_work(&hdev->rx_work);
2510
2511                 skb_queue_purge(&hdev->cmd_q);
2512                 skb_queue_purge(&hdev->rx_q);
2513
2514                 if (hdev->flush)
2515                         hdev->flush(hdev);
2516
2517                 if (hdev->sent_cmd) {
2518                         kfree_skb(hdev->sent_cmd);
2519                         hdev->sent_cmd = NULL;
2520                 }
2521
2522                 hdev->close(hdev);
2523                 hdev->flags &= BIT(HCI_RAW);
2524         }
2525
2526 done:
2527         hci_req_unlock(hdev);
2528         return ret;
2529 }
2530
2531 /* ---- HCI ioctl helpers ---- */
2532
2533 int hci_dev_open(__u16 dev)
2534 {
2535         struct hci_dev *hdev;
2536         int err;
2537
2538         hdev = hci_dev_get(dev);
2539         if (!hdev)
2540                 return -ENODEV;
2541
2542         /* Devices that are marked as unconfigured can only be powered
2543          * up as user channel. Trying to bring them up as normal devices
2544          * will result into a failure. Only user channel operation is
2545          * possible.
2546          *
2547          * When this function is called for a user channel, the flag
2548          * HCI_USER_CHANNEL will be set first before attempting to
2549          * open the device.
2550          */
2551         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2552             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2553                 err = -EOPNOTSUPP;
2554                 goto done;
2555         }
2556
2557         /* We need to ensure that no other power on/off work is pending
2558          * before proceeding to call hci_dev_do_open. This is
2559          * particularly important if the setup procedure has not yet
2560          * completed.
2561          */
2562         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2563                 cancel_delayed_work(&hdev->power_off);
2564
2565         /* After this call it is guaranteed that the setup procedure
2566          * has finished. This means that error conditions like RFKILL
2567          * or no valid public or static random address apply.
2568          */
2569         flush_workqueue(hdev->req_workqueue);
2570
2571         /* For controllers not using the management interface and that
2572          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2573          * so that pairing works for them. Once the management interface
2574          * is in use this bit will be cleared again and userspace has
2575          * to explicitly enable it.
2576          */
2577         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2578             !test_bit(HCI_MGMT, &hdev->dev_flags))
2579                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2580
2581         err = hci_dev_do_open(hdev);
2582
2583 done:
2584         hci_dev_put(hdev);
2585         return err;
2586 }
2587
2588 /* This function requires the caller holds hdev->lock */
2589 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2590 {
2591         struct hci_conn_params *p;
2592
2593         list_for_each_entry(p, &hdev->le_conn_params, list) {
2594                 if (p->conn) {
2595                         hci_conn_drop(p->conn);
2596                         hci_conn_put(p->conn);
2597                         p->conn = NULL;
2598                 }
2599                 list_del_init(&p->action);
2600         }
2601
2602         BT_DBG("All LE pending actions cleared");
2603 }
2604
2605 static int hci_dev_do_close(struct hci_dev *hdev)
2606 {
2607         BT_DBG("%s %p", hdev->name, hdev);
2608
2609         cancel_delayed_work(&hdev->power_off);
2610
2611         hci_req_cancel(hdev, ENODEV);
2612         hci_req_lock(hdev);
2613
2614         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2615                 cancel_delayed_work_sync(&hdev->cmd_timer);
2616                 hci_req_unlock(hdev);
2617                 return 0;
2618         }
2619
2620         /* Flush RX and TX works */
2621         flush_work(&hdev->tx_work);
2622         flush_work(&hdev->rx_work);
2623
2624         if (hdev->discov_timeout > 0) {
2625                 cancel_delayed_work(&hdev->discov_off);
2626                 hdev->discov_timeout = 0;
2627                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2628                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2629         }
2630
2631         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2632                 cancel_delayed_work(&hdev->service_cache);
2633
2634         cancel_delayed_work_sync(&hdev->le_scan_disable);
2635
2636         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2637                 cancel_delayed_work_sync(&hdev->rpa_expired);
2638
2639         /* Avoid potential lockdep warnings from the *_flush() calls by
2640          * ensuring the workqueue is empty up front.
2641          */
2642         drain_workqueue(hdev->workqueue);
2643
2644         hci_dev_lock(hdev);
2645
2646         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2647                 if (hdev->dev_type == HCI_BREDR)
2648                         mgmt_powered(hdev, 0);
2649         }
2650
2651         hci_inquiry_cache_flush(hdev);
2652         hci_pend_le_actions_clear(hdev);
2653         hci_conn_hash_flush(hdev);
2654         hci_dev_unlock(hdev);
2655
2656         hci_notify(hdev, HCI_DEV_DOWN);
2657
2658         if (hdev->flush)
2659                 hdev->flush(hdev);
2660
2661         /* Reset device */
2662         skb_queue_purge(&hdev->cmd_q);
2663         atomic_set(&hdev->cmd_cnt, 1);
2664         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2665             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2666             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2667                 set_bit(HCI_INIT, &hdev->flags);
2668                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2669                 clear_bit(HCI_INIT, &hdev->flags);
2670         }
2671
2672         /* flush cmd  work */
2673         flush_work(&hdev->cmd_work);
2674
2675         /* Drop queues */
2676         skb_queue_purge(&hdev->rx_q);
2677         skb_queue_purge(&hdev->cmd_q);
2678         skb_queue_purge(&hdev->raw_q);
2679
2680         /* Drop last sent command */
2681         if (hdev->sent_cmd) {
2682                 cancel_delayed_work_sync(&hdev->cmd_timer);
2683                 kfree_skb(hdev->sent_cmd);
2684                 hdev->sent_cmd = NULL;
2685         }
2686
2687         kfree_skb(hdev->recv_evt);
2688         hdev->recv_evt = NULL;
2689
2690         /* After this point our queues are empty
2691          * and no tasks are scheduled. */
2692         hdev->close(hdev);
2693
2694         /* Clear flags */
2695         hdev->flags &= BIT(HCI_RAW);
2696         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2697
2698         /* Controller radio is available but is currently powered down */
2699         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2700
2701         memset(hdev->eir, 0, sizeof(hdev->eir));
2702         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2703         bacpy(&hdev->random_addr, BDADDR_ANY);
2704
2705         hci_req_unlock(hdev);
2706
2707         hci_dev_put(hdev);
2708         return 0;
2709 }
2710
2711 int hci_dev_close(__u16 dev)
2712 {
2713         struct hci_dev *hdev;
2714         int err;
2715
2716         hdev = hci_dev_get(dev);
2717         if (!hdev)
2718                 return -ENODEV;
2719
2720         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2721                 err = -EBUSY;
2722                 goto done;
2723         }
2724
2725         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2726                 cancel_delayed_work(&hdev->power_off);
2727
2728         err = hci_dev_do_close(hdev);
2729
2730 done:
2731         hci_dev_put(hdev);
2732         return err;
2733 }
2734
2735 int hci_dev_reset(__u16 dev)
2736 {
2737         struct hci_dev *hdev;
2738         int ret = 0;
2739
2740         hdev = hci_dev_get(dev);
2741         if (!hdev)
2742                 return -ENODEV;
2743
2744         hci_req_lock(hdev);
2745
2746         if (!test_bit(HCI_UP, &hdev->flags)) {
2747                 ret = -ENETDOWN;
2748                 goto done;
2749         }
2750
2751         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2752                 ret = -EBUSY;
2753                 goto done;
2754         }
2755
2756         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2757                 ret = -EOPNOTSUPP;
2758                 goto done;
2759         }
2760
2761         /* Drop queues */
2762         skb_queue_purge(&hdev->rx_q);
2763         skb_queue_purge(&hdev->cmd_q);
2764
2765         /* Avoid potential lockdep warnings from the *_flush() calls by
2766          * ensuring the workqueue is empty up front.
2767          */
2768         drain_workqueue(hdev->workqueue);
2769
2770         hci_dev_lock(hdev);
2771         hci_inquiry_cache_flush(hdev);
2772         hci_conn_hash_flush(hdev);
2773         hci_dev_unlock(hdev);
2774
2775         if (hdev->flush)
2776                 hdev->flush(hdev);
2777
2778         atomic_set(&hdev->cmd_cnt, 1);
2779         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2780
2781         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2782
2783 done:
2784         hci_req_unlock(hdev);
2785         hci_dev_put(hdev);
2786         return ret;
2787 }
2788
2789 int hci_dev_reset_stat(__u16 dev)
2790 {
2791         struct hci_dev *hdev;
2792         int ret = 0;
2793
2794         hdev = hci_dev_get(dev);
2795         if (!hdev)
2796                 return -ENODEV;
2797
2798         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2799                 ret = -EBUSY;
2800                 goto done;
2801         }
2802
2803         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2804                 ret = -EOPNOTSUPP;
2805                 goto done;
2806         }
2807
2808         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2809
2810 done:
2811         hci_dev_put(hdev);
2812         return ret;
2813 }
2814
2815 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2816 {
2817         bool conn_changed, discov_changed;
2818
2819         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2820
2821         if ((scan & SCAN_PAGE))
2822                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2823                                                  &hdev->dev_flags);
2824         else
2825                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2826                                                   &hdev->dev_flags);
2827
2828         if ((scan & SCAN_INQUIRY)) {
2829                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2830                                                    &hdev->dev_flags);
2831         } else {
2832                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2833                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2834                                                     &hdev->dev_flags);
2835         }
2836
2837         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2838                 return;
2839
2840         if (conn_changed || discov_changed) {
2841                 /* In case this was disabled through mgmt */
2842                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2843
2844                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2845                         mgmt_update_adv_data(hdev);
2846
2847                 mgmt_new_settings(hdev);
2848         }
2849 }
2850
2851 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2852 {
2853         struct hci_dev *hdev;
2854         struct hci_dev_req dr;
2855         int err = 0;
2856
2857         if (copy_from_user(&dr, arg, sizeof(dr)))
2858                 return -EFAULT;
2859
2860         hdev = hci_dev_get(dr.dev_id);
2861         if (!hdev)
2862                 return -ENODEV;
2863
2864         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2865                 err = -EBUSY;
2866                 goto done;
2867         }
2868
2869         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2870                 err = -EOPNOTSUPP;
2871                 goto done;
2872         }
2873
2874         if (hdev->dev_type != HCI_BREDR) {
2875                 err = -EOPNOTSUPP;
2876                 goto done;
2877         }
2878
2879         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2880                 err = -EOPNOTSUPP;
2881                 goto done;
2882         }
2883
2884         switch (cmd) {
2885         case HCISETAUTH:
2886                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2887                                    HCI_INIT_TIMEOUT);
2888                 break;
2889
2890         case HCISETENCRYPT:
2891                 if (!lmp_encrypt_capable(hdev)) {
2892                         err = -EOPNOTSUPP;
2893                         break;
2894                 }
2895
2896                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2897                         /* Auth must be enabled first */
2898                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2899                                            HCI_INIT_TIMEOUT);
2900                         if (err)
2901                                 break;
2902                 }
2903
2904                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2905                                    HCI_INIT_TIMEOUT);
2906                 break;
2907
2908         case HCISETSCAN:
2909                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2910                                    HCI_INIT_TIMEOUT);
2911
2912                 /* Ensure that the connectable and discoverable states
2913                  * get correctly modified as this was a non-mgmt change.
2914                  */
2915                 if (!err)
2916                         hci_update_scan_state(hdev, dr.dev_opt);
2917                 break;
2918
2919         case HCISETLINKPOL:
2920                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2921                                    HCI_INIT_TIMEOUT);
2922                 break;
2923
2924         case HCISETLINKMODE:
2925                 hdev->link_mode = ((__u16) dr.dev_opt) &
2926                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2927                 break;
2928
2929         case HCISETPTYPE:
2930                 hdev->pkt_type = (__u16) dr.dev_opt;
2931                 break;
2932
2933         case HCISETACLMTU:
2934                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2935                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2936                 break;
2937
2938         case HCISETSCOMTU:
2939                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2940                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2941                 break;
2942
2943         default:
2944                 err = -EINVAL;
2945                 break;
2946         }
2947
2948 done:
2949         hci_dev_put(hdev);
2950         return err;
2951 }
2952
2953 int hci_get_dev_list(void __user *arg)
2954 {
2955         struct hci_dev *hdev;
2956         struct hci_dev_list_req *dl;
2957         struct hci_dev_req *dr;
2958         int n = 0, size, err;
2959         __u16 dev_num;
2960
2961         if (get_user(dev_num, (__u16 __user *) arg))
2962                 return -EFAULT;
2963
2964         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2965                 return -EINVAL;
2966
2967         size = sizeof(*dl) + dev_num * sizeof(*dr);
2968
2969         dl = kzalloc(size, GFP_KERNEL);
2970         if (!dl)
2971                 return -ENOMEM;
2972
2973         dr = dl->dev_req;
2974
2975         read_lock(&hci_dev_list_lock);
2976         list_for_each_entry(hdev, &hci_dev_list, list) {
2977                 unsigned long flags = hdev->flags;
2978
2979                 /* When the auto-off is configured it means the transport
2980                  * is running, but in that case still indicate that the
2981                  * device is actually down.
2982                  */
2983                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2984                         flags &= ~BIT(HCI_UP);
2985
2986                 (dr + n)->dev_id  = hdev->id;
2987                 (dr + n)->dev_opt = flags;
2988
2989                 if (++n >= dev_num)
2990                         break;
2991         }
2992         read_unlock(&hci_dev_list_lock);
2993
2994         dl->dev_num = n;
2995         size = sizeof(*dl) + n * sizeof(*dr);
2996
2997         err = copy_to_user(arg, dl, size);
2998         kfree(dl);
2999
3000         return err ? -EFAULT : 0;
3001 }
3002
3003 int hci_get_dev_info(void __user *arg)
3004 {
3005         struct hci_dev *hdev;
3006         struct hci_dev_info di;
3007         unsigned long flags;
3008         int err = 0;
3009
3010         if (copy_from_user(&di, arg, sizeof(di)))
3011                 return -EFAULT;
3012
3013         hdev = hci_dev_get(di.dev_id);
3014         if (!hdev)
3015                 return -ENODEV;
3016
3017         /* When the auto-off is configured it means the transport
3018          * is running, but in that case still indicate that the
3019          * device is actually down.
3020          */
3021         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3022                 flags = hdev->flags & ~BIT(HCI_UP);
3023         else
3024                 flags = hdev->flags;
3025
3026         strcpy(di.name, hdev->name);
3027         di.bdaddr   = hdev->bdaddr;
3028         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
3029         di.flags    = flags;
3030         di.pkt_type = hdev->pkt_type;
3031         if (lmp_bredr_capable(hdev)) {
3032                 di.acl_mtu  = hdev->acl_mtu;
3033                 di.acl_pkts = hdev->acl_pkts;
3034                 di.sco_mtu  = hdev->sco_mtu;
3035                 di.sco_pkts = hdev->sco_pkts;
3036         } else {
3037                 di.acl_mtu  = hdev->le_mtu;
3038                 di.acl_pkts = hdev->le_pkts;
3039                 di.sco_mtu  = 0;
3040                 di.sco_pkts = 0;
3041         }
3042         di.link_policy = hdev->link_policy;
3043         di.link_mode   = hdev->link_mode;
3044
3045         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3046         memcpy(&di.features, &hdev->features, sizeof(di.features));
3047
3048         if (copy_to_user(arg, &di, sizeof(di)))
3049                 err = -EFAULT;
3050
3051         hci_dev_put(hdev);
3052
3053         return err;
3054 }
3055
3056 /* ---- Interface to HCI drivers ---- */
3057
3058 static int hci_rfkill_set_block(void *data, bool blocked)
3059 {
3060         struct hci_dev *hdev = data;
3061
3062         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3063
3064         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3065                 return -EBUSY;
3066
3067         if (blocked) {
3068                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3069                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3070                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
3071                         hci_dev_do_close(hdev);
3072         } else {
3073                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3074         }
3075
3076         return 0;
3077 }
3078
3079 static const struct rfkill_ops hci_rfkill_ops = {
3080         .set_block = hci_rfkill_set_block,
3081 };
3082
3083 static void hci_power_on(struct work_struct *work)
3084 {
3085         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3086         int err;
3087
3088         BT_DBG("%s", hdev->name);
3089
3090         err = hci_dev_do_open(hdev);
3091         if (err < 0) {
3092                 hci_dev_lock(hdev);
3093                 mgmt_set_powered_failed(hdev, err);
3094                 hci_dev_unlock(hdev);
3095                 return;
3096         }
3097
3098         /* During the HCI setup phase, a few error conditions are
3099          * ignored and they need to be checked now. If they are still
3100          * valid, it is important to turn the device back off.
3101          */
3102         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3103             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3104             (hdev->dev_type == HCI_BREDR &&
3105              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3106              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3107                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3108                 hci_dev_do_close(hdev);
3109         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3110                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3111                                    HCI_AUTO_OFF_TIMEOUT);
3112         }
3113
3114         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3115                 /* For unconfigured devices, set the HCI_RAW flag
3116                  * so that userspace can easily identify them.
3117                  */
3118                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3119                         set_bit(HCI_RAW, &hdev->flags);
3120
3121                 /* For fully configured devices, this will send
3122                  * the Index Added event. For unconfigured devices,
3123                  * it will send Unconfigued Index Added event.
3124                  *
3125                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3126                  * and no event will be send.
3127                  */
3128                 mgmt_index_added(hdev);
3129         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3130                 /* When the controller is now configured, then it
3131                  * is important to clear the HCI_RAW flag.
3132                  */
3133                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3134                         clear_bit(HCI_RAW, &hdev->flags);
3135
3136                 /* Powering on the controller with HCI_CONFIG set only
3137                  * happens with the transition from unconfigured to
3138                  * configured. This will send the Index Added event.
3139                  */
3140                 mgmt_index_added(hdev);
3141         }
3142 }
3143
3144 static void hci_power_off(struct work_struct *work)
3145 {
3146         struct hci_dev *hdev = container_of(work, struct hci_dev,
3147                                             power_off.work);
3148
3149         BT_DBG("%s", hdev->name);
3150
3151         hci_dev_do_close(hdev);
3152 }
3153
3154 static void hci_discov_off(struct work_struct *work)
3155 {
3156         struct hci_dev *hdev;
3157
3158         hdev = container_of(work, struct hci_dev, discov_off.work);
3159
3160         BT_DBG("%s", hdev->name);
3161
3162         mgmt_discoverable_timeout(hdev);
3163 }
3164
3165 void hci_uuids_clear(struct hci_dev *hdev)
3166 {
3167         struct bt_uuid *uuid, *tmp;
3168
3169         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3170                 list_del(&uuid->list);
3171                 kfree(uuid);
3172         }
3173 }
3174
3175 void hci_link_keys_clear(struct hci_dev *hdev)
3176 {
3177         struct link_key *key;
3178
3179         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3180                 list_del_rcu(&key->list);
3181                 kfree_rcu(key, rcu);
3182         }
3183 }
3184
3185 void hci_smp_ltks_clear(struct hci_dev *hdev)
3186 {
3187         struct smp_ltk *k;
3188
3189         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3190                 list_del_rcu(&k->list);
3191                 kfree_rcu(k, rcu);
3192         }
3193 }
3194
3195 void hci_smp_irks_clear(struct hci_dev *hdev)
3196 {
3197         struct smp_irk *k;
3198
3199         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3200                 list_del_rcu(&k->list);
3201                 kfree_rcu(k, rcu);
3202         }
3203 }
3204
3205 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3206 {
3207         struct link_key *k;
3208
3209         rcu_read_lock();
3210         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3211                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3212                         rcu_read_unlock();
3213                         return k;
3214                 }
3215         }
3216         rcu_read_unlock();
3217
3218         return NULL;
3219 }
3220
3221 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3222                                u8 key_type, u8 old_key_type)
3223 {
3224         /* Legacy key */
3225         if (key_type < 0x03)
3226                 return true;
3227
3228         /* Debug keys are insecure so don't store them persistently */
3229         if (key_type == HCI_LK_DEBUG_COMBINATION)
3230                 return false;
3231
3232         /* Changed combination key and there's no previous one */
3233         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3234                 return false;
3235
3236         /* Security mode 3 case */
3237         if (!conn)
3238                 return true;
3239
3240         /* BR/EDR key derived using SC from an LE link */
3241         if (conn->type == LE_LINK)
3242                 return true;
3243
3244         /* Neither local nor remote side had no-bonding as requirement */
3245         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3246                 return true;
3247
3248         /* Local side had dedicated bonding as requirement */
3249         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3250                 return true;
3251
3252         /* Remote side had dedicated bonding as requirement */
3253         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3254                 return true;
3255
3256         /* If none of the above criteria match, then don't store the key
3257          * persistently */
3258         return false;
3259 }
3260
3261 static u8 ltk_role(u8 type)
3262 {
3263         if (type == SMP_LTK)
3264                 return HCI_ROLE_MASTER;
3265
3266         return HCI_ROLE_SLAVE;
3267 }
3268
3269 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3270                              u8 addr_type, u8 role)
3271 {
3272         struct smp_ltk *k;
3273
3274         rcu_read_lock();
3275         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3276                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3277                         continue;
3278
3279                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
3280                         rcu_read_unlock();
3281                         return k;
3282                 }
3283         }
3284         rcu_read_unlock();
3285
3286         return NULL;
3287 }
3288
3289 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3290 {
3291         struct smp_irk *irk;
3292
3293         rcu_read_lock();
3294         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3295                 if (!bacmp(&irk->rpa, rpa)) {
3296                         rcu_read_unlock();
3297                         return irk;
3298                 }
3299         }
3300
3301         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3302                 if (smp_irk_matches(hdev, irk->val, rpa)) {
3303                         bacpy(&irk->rpa, rpa);
3304                         rcu_read_unlock();
3305                         return irk;
3306                 }
3307         }
3308         rcu_read_unlock();
3309
3310         return NULL;
3311 }
3312
3313 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3314                                      u8 addr_type)
3315 {
3316         struct smp_irk *irk;
3317
3318         /* Identity Address must be public or static random */
3319         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3320                 return NULL;
3321
3322         rcu_read_lock();
3323         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3324                 if (addr_type == irk->addr_type &&
3325                     bacmp(bdaddr, &irk->bdaddr) == 0) {
3326                         rcu_read_unlock();
3327                         return irk;
3328                 }
3329         }
3330         rcu_read_unlock();
3331
3332         return NULL;
3333 }
3334
3335 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3336                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3337                                   u8 pin_len, bool *persistent)
3338 {
3339         struct link_key *key, *old_key;
3340         u8 old_key_type;
3341
3342         old_key = hci_find_link_key(hdev, bdaddr);
3343         if (old_key) {
3344                 old_key_type = old_key->type;
3345                 key = old_key;
3346         } else {
3347                 old_key_type = conn ? conn->key_type : 0xff;
3348                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3349                 if (!key)
3350                         return NULL;
3351                 list_add_rcu(&key->list, &hdev->link_keys);
3352         }
3353
3354         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3355
3356         /* Some buggy controller combinations generate a changed
3357          * combination key for legacy pairing even when there's no
3358          * previous key */
3359         if (type == HCI_LK_CHANGED_COMBINATION &&
3360             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3361                 type = HCI_LK_COMBINATION;
3362                 if (conn)
3363                         conn->key_type = type;
3364         }
3365
3366         bacpy(&key->bdaddr, bdaddr);
3367         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3368         key->pin_len = pin_len;
3369
3370         if (type == HCI_LK_CHANGED_COMBINATION)
3371                 key->type = old_key_type;
3372         else
3373                 key->type = type;
3374
3375         if (persistent)
3376                 *persistent = hci_persistent_key(hdev, conn, type,
3377                                                  old_key_type);
3378
3379         return key;
3380 }
3381
3382 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3383                             u8 addr_type, u8 type, u8 authenticated,
3384                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3385 {
3386         struct smp_ltk *key, *old_key;
3387         u8 role = ltk_role(type);
3388
3389         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
3390         if (old_key)
3391                 key = old_key;
3392         else {
3393                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3394                 if (!key)
3395                         return NULL;
3396                 list_add_rcu(&key->list, &hdev->long_term_keys);
3397         }
3398
3399         bacpy(&key->bdaddr, bdaddr);
3400         key->bdaddr_type = addr_type;
3401         memcpy(key->val, tk, sizeof(key->val));
3402         key->authenticated = authenticated;
3403         key->ediv = ediv;
3404         key->rand = rand;
3405         key->enc_size = enc_size;
3406         key->type = type;
3407
3408         return key;
3409 }
3410
3411 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3412                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3413 {
3414         struct smp_irk *irk;
3415
3416         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3417         if (!irk) {
3418                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3419                 if (!irk)
3420                         return NULL;
3421
3422                 bacpy(&irk->bdaddr, bdaddr);
3423                 irk->addr_type = addr_type;
3424
3425                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
3426         }
3427
3428         memcpy(irk->val, val, 16);
3429         bacpy(&irk->rpa, rpa);
3430
3431         return irk;
3432 }
3433
3434 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3435 {
3436         struct link_key *key;
3437
3438         key = hci_find_link_key(hdev, bdaddr);
3439         if (!key)
3440                 return -ENOENT;
3441
3442         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3443
3444         list_del_rcu(&key->list);
3445         kfree_rcu(key, rcu);
3446
3447         return 0;
3448 }
3449
3450 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3451 {
3452         struct smp_ltk *k;
3453         int removed = 0;
3454
3455         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3456                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3457                         continue;
3458
3459                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3460
3461                 list_del_rcu(&k->list);
3462                 kfree_rcu(k, rcu);
3463                 removed++;
3464         }
3465
3466         return removed ? 0 : -ENOENT;
3467 }
3468
3469 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3470 {
3471         struct smp_irk *k;
3472
3473         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3474                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3475                         continue;
3476
3477                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3478
3479                 list_del_rcu(&k->list);
3480                 kfree_rcu(k, rcu);
3481         }
3482 }
3483
3484 /* HCI command timer function */
3485 static void hci_cmd_timeout(struct work_struct *work)
3486 {
3487         struct hci_dev *hdev = container_of(work, struct hci_dev,
3488                                             cmd_timer.work);
3489
3490         if (hdev->sent_cmd) {
3491                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3492                 u16 opcode = __le16_to_cpu(sent->opcode);
3493
3494                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3495         } else {
3496                 BT_ERR("%s command tx timeout", hdev->name);
3497         }
3498
3499         atomic_set(&hdev->cmd_cnt, 1);
3500         queue_work(hdev->workqueue, &hdev->cmd_work);
3501 }
3502
3503 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3504                                           bdaddr_t *bdaddr, u8 bdaddr_type)
3505 {
3506         struct oob_data *data;
3507
3508         list_for_each_entry(data, &hdev->remote_oob_data, list) {
3509                 if (bacmp(bdaddr, &data->bdaddr) != 0)
3510                         continue;
3511                 if (data->bdaddr_type != bdaddr_type)
3512                         continue;
3513                 return data;
3514         }
3515
3516         return NULL;
3517 }
3518
3519 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3520                                u8 bdaddr_type)
3521 {
3522         struct oob_data *data;
3523
3524         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3525         if (!data)
3526                 return -ENOENT;
3527
3528         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
3529
3530         list_del(&data->list);
3531         kfree(data);
3532
3533         return 0;
3534 }
3535
3536 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3537 {
3538         struct oob_data *data, *n;
3539
3540         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3541                 list_del(&data->list);
3542                 kfree(data);
3543         }
3544 }
3545
3546 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3547                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
3548                             u8 *hash256, u8 *rand256)
3549 {
3550         struct oob_data *data;
3551
3552         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
3553         if (!data) {
3554                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3555                 if (!data)
3556                         return -ENOMEM;
3557
3558                 bacpy(&data->bdaddr, bdaddr);
3559                 data->bdaddr_type = bdaddr_type;
3560                 list_add(&data->list, &hdev->remote_oob_data);
3561         }
3562
3563         if (hash192 && rand192) {
3564                 memcpy(data->hash192, hash192, sizeof(data->hash192));
3565                 memcpy(data->rand192, rand192, sizeof(data->rand192));
3566         } else {
3567                 memset(data->hash192, 0, sizeof(data->hash192));
3568                 memset(data->rand192, 0, sizeof(data->rand192));
3569         }
3570
3571         if (hash256 && rand256) {
3572                 memcpy(data->hash256, hash256, sizeof(data->hash256));
3573                 memcpy(data->rand256, rand256, sizeof(data->rand256));
3574         } else {
3575                 memset(data->hash256, 0, sizeof(data->hash256));
3576                 memset(data->rand256, 0, sizeof(data->rand256));
3577         }
3578
3579         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3580
3581         return 0;
3582 }
3583
3584 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3585                                          bdaddr_t *bdaddr, u8 type)
3586 {
3587         struct bdaddr_list *b;
3588
3589         list_for_each_entry(b, bdaddr_list, list) {
3590                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3591                         return b;
3592         }
3593
3594         return NULL;
3595 }
3596
3597 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3598 {
3599         struct list_head *p, *n;
3600
3601         list_for_each_safe(p, n, bdaddr_list) {
3602                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3603
3604                 list_del(p);
3605                 kfree(b);
3606         }
3607 }
3608
3609 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3610 {
3611         struct bdaddr_list *entry;
3612
3613         if (!bacmp(bdaddr, BDADDR_ANY))
3614                 return -EBADF;
3615
3616         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3617                 return -EEXIST;
3618
3619         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3620         if (!entry)
3621                 return -ENOMEM;
3622
3623         bacpy(&entry->bdaddr, bdaddr);
3624         entry->bdaddr_type = type;
3625
3626         list_add(&entry->list, list);
3627
3628         return 0;
3629 }
3630
3631 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3632 {
3633         struct bdaddr_list *entry;
3634
3635         if (!bacmp(bdaddr, BDADDR_ANY)) {
3636                 hci_bdaddr_list_clear(list);
3637                 return 0;
3638         }
3639
3640         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3641         if (!entry)
3642                 return -ENOENT;
3643
3644         list_del(&entry->list);
3645         kfree(entry);
3646
3647         return 0;
3648 }
3649
3650 /* This function requires the caller holds hdev->lock */
3651 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3652                                                bdaddr_t *addr, u8 addr_type)
3653 {
3654         struct hci_conn_params *params;
3655
3656         /* The conn params list only contains identity addresses */
3657         if (!hci_is_identity_address(addr, addr_type))
3658                 return NULL;
3659
3660         list_for_each_entry(params, &hdev->le_conn_params, list) {
3661                 if (bacmp(&params->addr, addr) == 0 &&
3662                     params->addr_type == addr_type) {
3663                         return params;
3664                 }
3665         }
3666
3667         return NULL;
3668 }
3669
3670 /* This function requires the caller holds hdev->lock */
3671 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3672                                                   bdaddr_t *addr, u8 addr_type)
3673 {
3674         struct hci_conn_params *param;
3675
3676         /* The list only contains identity addresses */
3677         if (!hci_is_identity_address(addr, addr_type))
3678                 return NULL;
3679
3680         list_for_each_entry(param, list, action) {
3681                 if (bacmp(&param->addr, addr) == 0 &&
3682                     param->addr_type == addr_type)
3683                         return param;
3684         }
3685
3686         return NULL;
3687 }
3688
3689 /* This function requires the caller holds hdev->lock */
3690 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3691                                             bdaddr_t *addr, u8 addr_type)
3692 {
3693         struct hci_conn_params *params;
3694
3695         if (!hci_is_identity_address(addr, addr_type))
3696                 return NULL;
3697
3698         params = hci_conn_params_lookup(hdev, addr, addr_type);
3699         if (params)
3700                 return params;
3701
3702         params = kzalloc(sizeof(*params), GFP_KERNEL);
3703         if (!params) {
3704                 BT_ERR("Out of memory");
3705                 return NULL;
3706         }
3707
3708         bacpy(&params->addr, addr);
3709         params->addr_type = addr_type;
3710
3711         list_add(&params->list, &hdev->le_conn_params);
3712         INIT_LIST_HEAD(&params->action);
3713
3714         params->conn_min_interval = hdev->le_conn_min_interval;
3715         params->conn_max_interval = hdev->le_conn_max_interval;
3716         params->conn_latency = hdev->le_conn_latency;
3717         params->supervision_timeout = hdev->le_supv_timeout;
3718         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3719
3720         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3721
3722         return params;
3723 }
3724
3725 static void hci_conn_params_free(struct hci_conn_params *params)
3726 {
3727         if (params->conn) {
3728                 hci_conn_drop(params->conn);
3729                 hci_conn_put(params->conn);
3730         }
3731
3732         list_del(&params->action);
3733         list_del(&params->list);
3734         kfree(params);
3735 }
3736
3737 /* This function requires the caller holds hdev->lock */
3738 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3739 {
3740         struct hci_conn_params *params;
3741
3742         params = hci_conn_params_lookup(hdev, addr, addr_type);
3743         if (!params)
3744                 return;
3745
3746         hci_conn_params_free(params);
3747
3748         hci_update_background_scan(hdev);
3749
3750         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3751 }
3752
3753 /* This function requires the caller holds hdev->lock */
3754 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3755 {
3756         struct hci_conn_params *params, *tmp;
3757
3758         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3759                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3760                         continue;
3761                 list_del(&params->list);
3762                 kfree(params);
3763         }
3764
3765         BT_DBG("All LE disabled connection parameters were removed");
3766 }
3767
3768 /* This function requires the caller holds hdev->lock */
3769 void hci_conn_params_clear_all(struct hci_dev *hdev)
3770 {
3771         struct hci_conn_params *params, *tmp;
3772
3773         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3774                 hci_conn_params_free(params);
3775
3776         hci_update_background_scan(hdev);
3777
3778         BT_DBG("All LE connection parameters were removed");
3779 }
3780
3781 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3782 {
3783         if (status) {
3784                 BT_ERR("Failed to start inquiry: status %d", status);
3785
3786                 hci_dev_lock(hdev);
3787                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3788                 hci_dev_unlock(hdev);
3789                 return;
3790         }
3791 }
3792
3793 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3794 {
3795         /* General inquiry access code (GIAC) */
3796         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3797         struct hci_request req;
3798         struct hci_cp_inquiry cp;
3799         int err;
3800
3801         if (status) {
3802                 BT_ERR("Failed to disable LE scanning: status %d", status);
3803                 return;
3804         }
3805
3806         switch (hdev->discovery.type) {
3807         case DISCOV_TYPE_LE:
3808                 hci_dev_lock(hdev);
3809                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3810                 hci_dev_unlock(hdev);
3811                 break;
3812
3813         case DISCOV_TYPE_INTERLEAVED:
3814                 hci_req_init(&req, hdev);
3815
3816                 memset(&cp, 0, sizeof(cp));
3817                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3818                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3819                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3820
3821                 hci_dev_lock(hdev);
3822
3823                 hci_inquiry_cache_flush(hdev);
3824
3825                 err = hci_req_run(&req, inquiry_complete);
3826                 if (err) {
3827                         BT_ERR("Inquiry request failed: err %d", err);
3828                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3829                 }
3830
3831                 hci_dev_unlock(hdev);
3832                 break;
3833         }
3834 }
3835
3836 static void le_scan_disable_work(struct work_struct *work)
3837 {
3838         struct hci_dev *hdev = container_of(work, struct hci_dev,
3839                                             le_scan_disable.work);
3840         struct hci_request req;
3841         int err;
3842
3843         BT_DBG("%s", hdev->name);
3844
3845         hci_req_init(&req, hdev);
3846
3847         hci_req_add_le_scan_disable(&req);
3848
3849         err = hci_req_run(&req, le_scan_disable_work_complete);
3850         if (err)
3851                 BT_ERR("Disable LE scanning request failed: err %d", err);
3852 }
3853
3854 /* Copy the Identity Address of the controller.
3855  *
3856  * If the controller has a public BD_ADDR, then by default use that one.
3857  * If this is a LE only controller without a public address, default to
3858  * the static random address.
3859  *
3860  * For debugging purposes it is possible to force controllers with a
3861  * public address to use the static random address instead.
3862  *
3863  * In case BR/EDR has been disabled on a dual-mode controller and
3864  * userspace has configured a static address, then that address
3865  * becomes the identity address instead of the public BR/EDR address.
3866  */
3867 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3868                                u8 *bdaddr_type)
3869 {
3870         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3871             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3872             (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
3873              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3874                 bacpy(bdaddr, &hdev->static_addr);
3875                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3876         } else {
3877                 bacpy(bdaddr, &hdev->bdaddr);
3878                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3879         }
3880 }
3881
3882 /* Alloc HCI device */
3883 struct hci_dev *hci_alloc_dev(void)
3884 {
3885         struct hci_dev *hdev;
3886
3887         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3888         if (!hdev)
3889                 return NULL;
3890
3891         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3892         hdev->esco_type = (ESCO_HV1);
3893         hdev->link_mode = (HCI_LM_ACCEPT);
3894         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3895         hdev->io_capability = 0x03;     /* No Input No Output */
3896         hdev->manufacturer = 0xffff;    /* Default to internal use */
3897         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3898         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3899
3900         hdev->sniff_max_interval = 800;
3901         hdev->sniff_min_interval = 80;
3902
3903         hdev->le_adv_channel_map = 0x07;
3904         hdev->le_adv_min_interval = 0x0800;
3905         hdev->le_adv_max_interval = 0x0800;
3906         hdev->le_scan_interval = 0x0060;
3907         hdev->le_scan_window = 0x0030;
3908         hdev->le_conn_min_interval = 0x0028;
3909         hdev->le_conn_max_interval = 0x0038;
3910         hdev->le_conn_latency = 0x0000;
3911         hdev->le_supv_timeout = 0x002a;
3912
3913         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3914         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3915         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3916         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3917
3918         mutex_init(&hdev->lock);
3919         mutex_init(&hdev->req_lock);
3920
3921         INIT_LIST_HEAD(&hdev->mgmt_pending);
3922         INIT_LIST_HEAD(&hdev->blacklist);
3923         INIT_LIST_HEAD(&hdev->whitelist);
3924         INIT_LIST_HEAD(&hdev->uuids);
3925         INIT_LIST_HEAD(&hdev->link_keys);
3926         INIT_LIST_HEAD(&hdev->long_term_keys);
3927         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3928         INIT_LIST_HEAD(&hdev->remote_oob_data);
3929         INIT_LIST_HEAD(&hdev->le_white_list);
3930         INIT_LIST_HEAD(&hdev->le_conn_params);
3931         INIT_LIST_HEAD(&hdev->pend_le_conns);
3932         INIT_LIST_HEAD(&hdev->pend_le_reports);
3933         INIT_LIST_HEAD(&hdev->conn_hash.list);
3934
3935         INIT_WORK(&hdev->rx_work, hci_rx_work);
3936         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3937         INIT_WORK(&hdev->tx_work, hci_tx_work);
3938         INIT_WORK(&hdev->power_on, hci_power_on);
3939
3940         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3941         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3942         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3943
3944         skb_queue_head_init(&hdev->rx_q);
3945         skb_queue_head_init(&hdev->cmd_q);
3946         skb_queue_head_init(&hdev->raw_q);
3947
3948         init_waitqueue_head(&hdev->req_wait_q);
3949
3950         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3951
3952         hci_init_sysfs(hdev);
3953         discovery_init(hdev);
3954
3955         return hdev;
3956 }
3957 EXPORT_SYMBOL(hci_alloc_dev);
3958
3959 /* Free HCI device */
3960 void hci_free_dev(struct hci_dev *hdev)
3961 {
3962         /* will free via device release */
3963         put_device(&hdev->dev);
3964 }
3965 EXPORT_SYMBOL(hci_free_dev);
3966
3967 /* Register HCI device */
3968 int hci_register_dev(struct hci_dev *hdev)
3969 {
3970         int id, error;
3971
3972         if (!hdev->open || !hdev->close || !hdev->send)
3973                 return -EINVAL;
3974
3975         /* Do not allow HCI_AMP devices to register at index 0,
3976          * so the index can be used as the AMP controller ID.
3977          */
3978         switch (hdev->dev_type) {
3979         case HCI_BREDR:
3980                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3981                 break;
3982         case HCI_AMP:
3983                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3984                 break;
3985         default:
3986                 return -EINVAL;
3987         }
3988
3989         if (id < 0)
3990                 return id;
3991
3992         sprintf(hdev->name, "hci%d", id);
3993         hdev->id = id;
3994
3995         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3996
3997         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3998                                           WQ_MEM_RECLAIM, 1, hdev->name);
3999         if (!hdev->workqueue) {
4000                 error = -ENOMEM;
4001                 goto err;
4002         }
4003
4004         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4005                                               WQ_MEM_RECLAIM, 1, hdev->name);
4006         if (!hdev->req_workqueue) {
4007                 destroy_workqueue(hdev->workqueue);
4008                 error = -ENOMEM;
4009                 goto err;
4010         }
4011
4012         if (!IS_ERR_OR_NULL(bt_debugfs))
4013                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4014
4015         dev_set_name(&hdev->dev, "%s", hdev->name);
4016
4017         error = device_add(&hdev->dev);
4018         if (error < 0)
4019                 goto err_wqueue;
4020
4021         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4022                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4023                                     hdev);
4024         if (hdev->rfkill) {
4025                 if (rfkill_register(hdev->rfkill) < 0) {
4026                         rfkill_destroy(hdev->rfkill);
4027                         hdev->rfkill = NULL;
4028                 }
4029         }
4030
4031         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4032                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4033
4034         set_bit(HCI_SETUP, &hdev->dev_flags);
4035         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4036
4037         if (hdev->dev_type == HCI_BREDR) {
4038                 /* Assume BR/EDR support until proven otherwise (such as
4039                  * through reading supported features during init.
4040                  */
4041                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4042         }
4043
4044         write_lock(&hci_dev_list_lock);
4045         list_add(&hdev->list, &hci_dev_list);
4046         write_unlock(&hci_dev_list_lock);
4047
4048         /* Devices that are marked for raw-only usage are unconfigured
4049          * and should not be included in normal operation.
4050          */
4051         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4052                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4053
4054         hci_notify(hdev, HCI_DEV_REG);
4055         hci_dev_hold(hdev);
4056
4057         queue_work(hdev->req_workqueue, &hdev->power_on);
4058
4059         return id;
4060
4061 err_wqueue:
4062         destroy_workqueue(hdev->workqueue);
4063         destroy_workqueue(hdev->req_workqueue);
4064 err:
4065         ida_simple_remove(&hci_index_ida, hdev->id);
4066
4067         return error;
4068 }
4069 EXPORT_SYMBOL(hci_register_dev);
4070
4071 /* Unregister HCI device */
4072 void hci_unregister_dev(struct hci_dev *hdev)
4073 {
4074         int i, id;
4075
4076         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4077
4078         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4079
4080         id = hdev->id;
4081
4082         write_lock(&hci_dev_list_lock);
4083         list_del(&hdev->list);
4084         write_unlock(&hci_dev_list_lock);
4085
4086         hci_dev_do_close(hdev);
4087
4088         for (i = 0; i < NUM_REASSEMBLY; i++)
4089                 kfree_skb(hdev->reassembly[i]);
4090
4091         cancel_work_sync(&hdev->power_on);
4092
4093         if (!test_bit(HCI_INIT, &hdev->flags) &&
4094             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4095             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4096                 hci_dev_lock(hdev);
4097                 mgmt_index_removed(hdev);
4098                 hci_dev_unlock(hdev);
4099         }
4100
4101         /* mgmt_index_removed should take care of emptying the
4102          * pending list */
4103         BUG_ON(!list_empty(&hdev->mgmt_pending));
4104
4105         hci_notify(hdev, HCI_DEV_UNREG);
4106
4107         if (hdev->rfkill) {
4108                 rfkill_unregister(hdev->rfkill);
4109                 rfkill_destroy(hdev->rfkill);
4110         }
4111
4112         smp_unregister(hdev);
4113
4114         device_del(&hdev->dev);
4115
4116         debugfs_remove_recursive(hdev->debugfs);
4117
4118         destroy_workqueue(hdev->workqueue);
4119         destroy_workqueue(hdev->req_workqueue);
4120
4121         hci_dev_lock(hdev);
4122         hci_bdaddr_list_clear(&hdev->blacklist);
4123         hci_bdaddr_list_clear(&hdev->whitelist);
4124         hci_uuids_clear(hdev);
4125         hci_link_keys_clear(hdev);
4126         hci_smp_ltks_clear(hdev);
4127         hci_smp_irks_clear(hdev);
4128         hci_remote_oob_data_clear(hdev);
4129         hci_bdaddr_list_clear(&hdev->le_white_list);
4130         hci_conn_params_clear_all(hdev);
4131         hci_discovery_filter_clear(hdev);
4132         hci_dev_unlock(hdev);
4133
4134         hci_dev_put(hdev);
4135
4136         ida_simple_remove(&hci_index_ida, id);
4137 }
4138 EXPORT_SYMBOL(hci_unregister_dev);
4139
4140 /* Suspend HCI device */
4141 int hci_suspend_dev(struct hci_dev *hdev)
4142 {
4143         hci_notify(hdev, HCI_DEV_SUSPEND);
4144         return 0;
4145 }
4146 EXPORT_SYMBOL(hci_suspend_dev);
4147
4148 /* Resume HCI device */
4149 int hci_resume_dev(struct hci_dev *hdev)
4150 {
4151         hci_notify(hdev, HCI_DEV_RESUME);
4152         return 0;
4153 }
4154 EXPORT_SYMBOL(hci_resume_dev);
4155
4156 /* Reset HCI device */
4157 int hci_reset_dev(struct hci_dev *hdev)
4158 {
4159         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4160         struct sk_buff *skb;
4161
4162         skb = bt_skb_alloc(3, GFP_ATOMIC);
4163         if (!skb)
4164                 return -ENOMEM;
4165
4166         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4167         memcpy(skb_put(skb, 3), hw_err, 3);
4168
4169         /* Send Hardware Error to upper stack */
4170         return hci_recv_frame(hdev, skb);
4171 }
4172 EXPORT_SYMBOL(hci_reset_dev);
4173
4174 /* Receive frame from HCI drivers */
4175 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4176 {
4177         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4178                       && !test_bit(HCI_INIT, &hdev->flags))) {
4179                 kfree_skb(skb);
4180                 return -ENXIO;
4181         }
4182
4183         /* Incoming skb */
4184         bt_cb(skb)->incoming = 1;
4185
4186         /* Time stamp */
4187         __net_timestamp(skb);
4188
4189         skb_queue_tail(&hdev->rx_q, skb);
4190         queue_work(hdev->workqueue, &hdev->rx_work);
4191
4192         return 0;
4193 }
4194 EXPORT_SYMBOL(hci_recv_frame);
4195
4196 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4197                           int count, __u8 index)
4198 {
4199         int len = 0;
4200         int hlen = 0;
4201         int remain = count;
4202         struct sk_buff *skb;
4203         struct bt_skb_cb *scb;
4204
4205         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4206             index >= NUM_REASSEMBLY)
4207                 return -EILSEQ;
4208
4209         skb = hdev->reassembly[index];
4210
4211         if (!skb) {
4212                 switch (type) {
4213                 case HCI_ACLDATA_PKT:
4214                         len = HCI_MAX_FRAME_SIZE;
4215                         hlen = HCI_ACL_HDR_SIZE;
4216                         break;
4217                 case HCI_EVENT_PKT:
4218                         len = HCI_MAX_EVENT_SIZE;
4219                         hlen = HCI_EVENT_HDR_SIZE;
4220                         break;
4221                 case HCI_SCODATA_PKT:
4222                         len = HCI_MAX_SCO_SIZE;
4223                         hlen = HCI_SCO_HDR_SIZE;
4224                         break;
4225                 }
4226
4227                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4228                 if (!skb)
4229                         return -ENOMEM;
4230
4231                 scb = (void *) skb->cb;
4232                 scb->expect = hlen;
4233                 scb->pkt_type = type;
4234
4235                 hdev->reassembly[index] = skb;
4236         }
4237
4238         while (count) {
4239                 scb = (void *) skb->cb;
4240                 len = min_t(uint, scb->expect, count);
4241
4242                 memcpy(skb_put(skb, len), data, len);
4243
4244                 count -= len;
4245                 data += len;
4246                 scb->expect -= len;
4247                 remain = count;
4248
4249                 switch (type) {
4250                 case HCI_EVENT_PKT:
4251                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4252                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4253                                 scb->expect = h->plen;
4254
4255                                 if (skb_tailroom(skb) < scb->expect) {
4256                                         kfree_skb(skb);
4257                                         hdev->reassembly[index] = NULL;
4258                                         return -ENOMEM;
4259                                 }
4260                         }
4261                         break;
4262
4263                 case HCI_ACLDATA_PKT:
4264                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4265                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4266                                 scb->expect = __le16_to_cpu(h->dlen);
4267
4268                                 if (skb_tailroom(skb) < scb->expect) {
4269                                         kfree_skb(skb);
4270                                         hdev->reassembly[index] = NULL;
4271                                         return -ENOMEM;
4272                                 }
4273                         }
4274                         break;
4275
4276                 case HCI_SCODATA_PKT:
4277                         if (skb->len == HCI_SCO_HDR_SIZE) {
4278                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4279                                 scb->expect = h->dlen;
4280
4281                                 if (skb_tailroom(skb) < scb->expect) {
4282                                         kfree_skb(skb);
4283                                         hdev->reassembly[index] = NULL;
4284                                         return -ENOMEM;
4285                                 }
4286                         }
4287                         break;
4288                 }
4289
4290                 if (scb->expect == 0) {
4291                         /* Complete frame */
4292
4293                         bt_cb(skb)->pkt_type = type;
4294                         hci_recv_frame(hdev, skb);
4295
4296                         hdev->reassembly[index] = NULL;
4297                         return remain;
4298                 }
4299         }
4300
4301         return remain;
4302 }
4303
4304 #define STREAM_REASSEMBLY 0
4305
4306 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4307 {
4308         int type;
4309         int rem = 0;
4310
4311         while (count) {
4312                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4313
4314                 if (!skb) {
4315                         struct { char type; } *pkt;
4316
4317                         /* Start of the frame */
4318                         pkt = data;
4319                         type = pkt->type;
4320
4321                         data++;
4322                         count--;
4323                 } else
4324                         type = bt_cb(skb)->pkt_type;
4325
4326                 rem = hci_reassembly(hdev, type, data, count,
4327                                      STREAM_REASSEMBLY);
4328                 if (rem < 0)
4329                         return rem;
4330
4331                 data += (count - rem);
4332                 count = rem;
4333         }
4334
4335         return rem;
4336 }
4337 EXPORT_SYMBOL(hci_recv_stream_fragment);
4338
4339 /* ---- Interface to upper protocols ---- */
4340
4341 int hci_register_cb(struct hci_cb *cb)
4342 {
4343         BT_DBG("%p name %s", cb, cb->name);
4344
4345         write_lock(&hci_cb_list_lock);
4346         list_add(&cb->list, &hci_cb_list);
4347         write_unlock(&hci_cb_list_lock);
4348
4349         return 0;
4350 }
4351 EXPORT_SYMBOL(hci_register_cb);
4352
4353 int hci_unregister_cb(struct hci_cb *cb)
4354 {
4355         BT_DBG("%p name %s", cb, cb->name);
4356
4357         write_lock(&hci_cb_list_lock);
4358         list_del(&cb->list);
4359         write_unlock(&hci_cb_list_lock);
4360
4361         return 0;
4362 }
4363 EXPORT_SYMBOL(hci_unregister_cb);
4364
4365 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4366 {
4367         int err;
4368
4369         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4370
4371         /* Time stamp */
4372         __net_timestamp(skb);
4373
4374         /* Send copy to monitor */
4375         hci_send_to_monitor(hdev, skb);
4376
4377         if (atomic_read(&hdev->promisc)) {
4378                 /* Send copy to the sockets */
4379                 hci_send_to_sock(hdev, skb);
4380         }
4381
4382         /* Get rid of skb owner, prior to sending to the driver. */
4383         skb_orphan(skb);
4384
4385         err = hdev->send(hdev, skb);
4386         if (err < 0) {
4387                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4388                 kfree_skb(skb);
4389         }
4390 }
4391
4392 bool hci_req_pending(struct hci_dev *hdev)
4393 {
4394         return (hdev->req_status == HCI_REQ_PEND);
4395 }
4396
4397 /* Send HCI command */
4398 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4399                  const void *param)
4400 {
4401         struct sk_buff *skb;
4402
4403         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4404
4405         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4406         if (!skb) {
4407                 BT_ERR("%s no memory for command", hdev->name);
4408                 return -ENOMEM;
4409         }
4410
4411         /* Stand-alone HCI commands must be flagged as
4412          * single-command requests.
4413          */
4414         bt_cb(skb)->req.start = true;
4415
4416         skb_queue_tail(&hdev->cmd_q, skb);
4417         queue_work(hdev->workqueue, &hdev->cmd_work);
4418
4419         return 0;
4420 }
4421
4422 /* Get data from the previously sent command */
4423 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4424 {
4425         struct hci_command_hdr *hdr;
4426
4427         if (!hdev->sent_cmd)
4428                 return NULL;
4429
4430         hdr = (void *) hdev->sent_cmd->data;
4431
4432         if (hdr->opcode != cpu_to_le16(opcode))
4433                 return NULL;
4434
4435         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4436
4437         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4438 }
4439
4440 /* Send ACL data */
4441 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4442 {
4443         struct hci_acl_hdr *hdr;
4444         int len = skb->len;
4445
4446         skb_push(skb, HCI_ACL_HDR_SIZE);
4447         skb_reset_transport_header(skb);
4448         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4449         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4450         hdr->dlen   = cpu_to_le16(len);
4451 }
4452
4453 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4454                           struct sk_buff *skb, __u16 flags)
4455 {
4456         struct hci_conn *conn = chan->conn;
4457         struct hci_dev *hdev = conn->hdev;
4458         struct sk_buff *list;
4459
4460         skb->len = skb_headlen(skb);
4461         skb->data_len = 0;
4462
4463         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4464
4465         switch (hdev->dev_type) {
4466         case HCI_BREDR:
4467                 hci_add_acl_hdr(skb, conn->handle, flags);
4468                 break;
4469         case HCI_AMP:
4470                 hci_add_acl_hdr(skb, chan->handle, flags);
4471                 break;
4472         default:
4473                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4474                 return;
4475         }
4476
4477         list = skb_shinfo(skb)->frag_list;
4478         if (!list) {
4479                 /* Non fragmented */
4480                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4481
4482                 skb_queue_tail(queue, skb);
4483         } else {
4484                 /* Fragmented */
4485                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4486
4487                 skb_shinfo(skb)->frag_list = NULL;
4488
4489                 /* Queue all fragments atomically. We need to use spin_lock_bh
4490                  * here because of 6LoWPAN links, as there this function is
4491                  * called from softirq and using normal spin lock could cause
4492                  * deadlocks.
4493                  */
4494                 spin_lock_bh(&queue->lock);
4495
4496                 __skb_queue_tail(queue, skb);
4497
4498                 flags &= ~ACL_START;
4499                 flags |= ACL_CONT;
4500                 do {
4501                         skb = list; list = list->next;
4502
4503                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4504                         hci_add_acl_hdr(skb, conn->handle, flags);
4505
4506                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4507
4508                         __skb_queue_tail(queue, skb);
4509                 } while (list);
4510
4511                 spin_unlock_bh(&queue->lock);
4512         }
4513 }
4514
4515 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4516 {
4517         struct hci_dev *hdev = chan->conn->hdev;
4518
4519         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4520
4521         hci_queue_acl(chan, &chan->data_q, skb, flags);
4522
4523         queue_work(hdev->workqueue, &hdev->tx_work);
4524 }
4525
4526 /* Send SCO data */
4527 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4528 {
4529         struct hci_dev *hdev = conn->hdev;
4530         struct hci_sco_hdr hdr;
4531
4532         BT_DBG("%s len %d", hdev->name, skb->len);
4533
4534         hdr.handle = cpu_to_le16(conn->handle);
4535         hdr.dlen   = skb->len;
4536
4537         skb_push(skb, HCI_SCO_HDR_SIZE);
4538         skb_reset_transport_header(skb);
4539         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4540
4541         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4542
4543         skb_queue_tail(&conn->data_q, skb);
4544         queue_work(hdev->workqueue, &hdev->tx_work);
4545 }
4546
4547 /* ---- HCI TX task (outgoing data) ---- */
4548
4549 /* HCI Connection scheduler */
4550 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4551                                      int *quote)
4552 {
4553         struct hci_conn_hash *h = &hdev->conn_hash;
4554         struct hci_conn *conn = NULL, *c;
4555         unsigned int num = 0, min = ~0;
4556
4557         /* We don't have to lock device here. Connections are always
4558          * added and removed with TX task disabled. */
4559
4560         rcu_read_lock();
4561
4562         list_for_each_entry_rcu(c, &h->list, list) {
4563                 if (c->type != type || skb_queue_empty(&c->data_q))
4564                         continue;
4565
4566                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4567                         continue;
4568
4569                 num++;
4570
4571                 if (c->sent < min) {
4572                         min  = c->sent;
4573                         conn = c;
4574                 }
4575
4576                 if (hci_conn_num(hdev, type) == num)
4577                         break;
4578         }
4579
4580         rcu_read_unlock();
4581
4582         if (conn) {
4583                 int cnt, q;
4584
4585                 switch (conn->type) {
4586                 case ACL_LINK:
4587                         cnt = hdev->acl_cnt;
4588                         break;
4589                 case SCO_LINK:
4590                 case ESCO_LINK:
4591                         cnt = hdev->sco_cnt;
4592                         break;
4593                 case LE_LINK:
4594                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4595                         break;
4596                 default:
4597                         cnt = 0;
4598                         BT_ERR("Unknown link type");
4599                 }
4600
4601                 q = cnt / num;
4602                 *quote = q ? q : 1;
4603         } else
4604                 *quote = 0;
4605
4606         BT_DBG("conn %p quote %d", conn, *quote);
4607         return conn;
4608 }
4609
4610 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4611 {
4612         struct hci_conn_hash *h = &hdev->conn_hash;
4613         struct hci_conn *c;
4614
4615         BT_ERR("%s link tx timeout", hdev->name);
4616
4617         rcu_read_lock();
4618
4619         /* Kill stalled connections */
4620         list_for_each_entry_rcu(c, &h->list, list) {
4621                 if (c->type == type && c->sent) {
4622                         BT_ERR("%s killing stalled connection %pMR",
4623                                hdev->name, &c->dst);
4624                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4625                 }
4626         }
4627
4628         rcu_read_unlock();
4629 }
4630
4631 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4632                                       int *quote)
4633 {
4634         struct hci_conn_hash *h = &hdev->conn_hash;
4635         struct hci_chan *chan = NULL;
4636         unsigned int num = 0, min = ~0, cur_prio = 0;
4637         struct hci_conn *conn;
4638         int cnt, q, conn_num = 0;
4639
4640         BT_DBG("%s", hdev->name);
4641
4642         rcu_read_lock();
4643
4644         list_for_each_entry_rcu(conn, &h->list, list) {
4645                 struct hci_chan *tmp;
4646
4647                 if (conn->type != type)
4648                         continue;
4649
4650                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4651                         continue;
4652
4653                 conn_num++;
4654
4655                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4656                         struct sk_buff *skb;
4657
4658                         if (skb_queue_empty(&tmp->data_q))
4659                                 continue;
4660
4661                         skb = skb_peek(&tmp->data_q);
4662                         if (skb->priority < cur_prio)
4663                                 continue;
4664
4665                         if (skb->priority > cur_prio) {
4666                                 num = 0;
4667                                 min = ~0;
4668                                 cur_prio = skb->priority;
4669                         }
4670
4671                         num++;
4672
4673                         if (conn->sent < min) {
4674                                 min  = conn->sent;
4675                                 chan = tmp;
4676                         }
4677                 }
4678
4679                 if (hci_conn_num(hdev, type) == conn_num)
4680                         break;
4681         }
4682
4683         rcu_read_unlock();
4684
4685         if (!chan)
4686                 return NULL;
4687
4688         switch (chan->conn->type) {
4689         case ACL_LINK:
4690                 cnt = hdev->acl_cnt;
4691                 break;
4692         case AMP_LINK:
4693                 cnt = hdev->block_cnt;
4694                 break;
4695         case SCO_LINK:
4696         case ESCO_LINK:
4697                 cnt = hdev->sco_cnt;
4698                 break;
4699         case LE_LINK:
4700                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4701                 break;
4702         default:
4703                 cnt = 0;
4704                 BT_ERR("Unknown link type");
4705         }
4706
4707         q = cnt / num;
4708         *quote = q ? q : 1;
4709         BT_DBG("chan %p quote %d", chan, *quote);
4710         return chan;
4711 }
4712
4713 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4714 {
4715         struct hci_conn_hash *h = &hdev->conn_hash;
4716         struct hci_conn *conn;
4717         int num = 0;
4718
4719         BT_DBG("%s", hdev->name);
4720
4721         rcu_read_lock();
4722
4723         list_for_each_entry_rcu(conn, &h->list, list) {
4724                 struct hci_chan *chan;
4725
4726                 if (conn->type != type)
4727                         continue;
4728
4729                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4730                         continue;
4731
4732                 num++;
4733
4734                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4735                         struct sk_buff *skb;
4736
4737                         if (chan->sent) {
4738                                 chan->sent = 0;
4739                                 continue;
4740                         }
4741
4742                         if (skb_queue_empty(&chan->data_q))
4743                                 continue;
4744
4745                         skb = skb_peek(&chan->data_q);
4746                         if (skb->priority >= HCI_PRIO_MAX - 1)
4747                                 continue;
4748
4749                         skb->priority = HCI_PRIO_MAX - 1;
4750
4751                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4752                                skb->priority);
4753                 }
4754
4755                 if (hci_conn_num(hdev, type) == num)
4756                         break;
4757         }
4758
4759         rcu_read_unlock();
4760
4761 }
4762
4763 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4764 {
4765         /* Calculate count of blocks used by this packet */
4766         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4767 }
4768
4769 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4770 {
4771         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4772                 /* ACL tx timeout must be longer than maximum
4773                  * link supervision timeout (40.9 seconds) */
4774                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4775                                        HCI_ACL_TX_TIMEOUT))
4776                         hci_link_tx_to(hdev, ACL_LINK);
4777         }
4778 }
4779
4780 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4781 {
4782         unsigned int cnt = hdev->acl_cnt;
4783         struct hci_chan *chan;
4784         struct sk_buff *skb;
4785         int quote;
4786
4787         __check_timeout(hdev, cnt);
4788
4789         while (hdev->acl_cnt &&
4790                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4791                 u32 priority = (skb_peek(&chan->data_q))->priority;
4792                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4793                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4794                                skb->len, skb->priority);
4795
4796                         /* Stop if priority has changed */
4797                         if (skb->priority < priority)
4798                                 break;
4799
4800                         skb = skb_dequeue(&chan->data_q);
4801
4802                         hci_conn_enter_active_mode(chan->conn,
4803                                                    bt_cb(skb)->force_active);
4804
4805                         hci_send_frame(hdev, skb);
4806                         hdev->acl_last_tx = jiffies;
4807
4808                         hdev->acl_cnt--;
4809                         chan->sent++;
4810                         chan->conn->sent++;
4811                 }
4812         }
4813
4814         if (cnt != hdev->acl_cnt)
4815                 hci_prio_recalculate(hdev, ACL_LINK);
4816 }
4817
4818 static void hci_sched_acl_blk(struct hci_dev *hdev)
4819 {
4820         unsigned int cnt = hdev->block_cnt;
4821         struct hci_chan *chan;
4822         struct sk_buff *skb;
4823         int quote;
4824         u8 type;
4825
4826         __check_timeout(hdev, cnt);
4827
4828         BT_DBG("%s", hdev->name);
4829
4830         if (hdev->dev_type == HCI_AMP)
4831                 type = AMP_LINK;
4832         else
4833                 type = ACL_LINK;
4834
4835         while (hdev->block_cnt > 0 &&
4836                (chan = hci_chan_sent(hdev, type, &quote))) {
4837                 u32 priority = (skb_peek(&chan->data_q))->priority;
4838                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4839                         int blocks;
4840
4841                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4842                                skb->len, skb->priority);
4843
4844                         /* Stop if priority has changed */
4845                         if (skb->priority < priority)
4846                                 break;
4847
4848                         skb = skb_dequeue(&chan->data_q);
4849
4850                         blocks = __get_blocks(hdev, skb);
4851                         if (blocks > hdev->block_cnt)
4852                                 return;
4853
4854                         hci_conn_enter_active_mode(chan->conn,
4855                                                    bt_cb(skb)->force_active);
4856
4857                         hci_send_frame(hdev, skb);
4858                         hdev->acl_last_tx = jiffies;
4859
4860                         hdev->block_cnt -= blocks;
4861                         quote -= blocks;
4862
4863                         chan->sent += blocks;
4864                         chan->conn->sent += blocks;
4865                 }
4866         }
4867
4868         if (cnt != hdev->block_cnt)
4869                 hci_prio_recalculate(hdev, type);
4870 }
4871
4872 static void hci_sched_acl(struct hci_dev *hdev)
4873 {
4874         BT_DBG("%s", hdev->name);
4875
4876         /* No ACL link over BR/EDR controller */
4877         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4878                 return;
4879
4880         /* No AMP link over AMP controller */
4881         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4882                 return;
4883
4884         switch (hdev->flow_ctl_mode) {
4885         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4886                 hci_sched_acl_pkt(hdev);
4887                 break;
4888
4889         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4890                 hci_sched_acl_blk(hdev);
4891                 break;
4892         }
4893 }
4894
4895 /* Schedule SCO */
4896 static void hci_sched_sco(struct hci_dev *hdev)
4897 {
4898         struct hci_conn *conn;
4899         struct sk_buff *skb;
4900         int quote;
4901
4902         BT_DBG("%s", hdev->name);
4903
4904         if (!hci_conn_num(hdev, SCO_LINK))
4905                 return;
4906
4907         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4908                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4909                         BT_DBG("skb %p len %d", skb, skb->len);
4910                         hci_send_frame(hdev, skb);
4911
4912                         conn->sent++;
4913                         if (conn->sent == ~0)
4914                                 conn->sent = 0;
4915                 }
4916         }
4917 }
4918
4919 static void hci_sched_esco(struct hci_dev *hdev)
4920 {
4921         struct hci_conn *conn;
4922         struct sk_buff *skb;
4923         int quote;
4924
4925         BT_DBG("%s", hdev->name);
4926
4927         if (!hci_conn_num(hdev, ESCO_LINK))
4928                 return;
4929
4930         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4931                                                      &quote))) {
4932                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4933                         BT_DBG("skb %p len %d", skb, skb->len);
4934                         hci_send_frame(hdev, skb);
4935
4936                         conn->sent++;
4937                         if (conn->sent == ~0)
4938                                 conn->sent = 0;
4939                 }
4940         }
4941 }
4942
4943 static void hci_sched_le(struct hci_dev *hdev)
4944 {
4945         struct hci_chan *chan;
4946         struct sk_buff *skb;
4947         int quote, cnt, tmp;
4948
4949         BT_DBG("%s", hdev->name);
4950
4951         if (!hci_conn_num(hdev, LE_LINK))
4952                 return;
4953
4954         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4955                 /* LE tx timeout must be longer than maximum
4956                  * link supervision timeout (40.9 seconds) */
4957                 if (!hdev->le_cnt && hdev->le_pkts &&
4958                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4959                         hci_link_tx_to(hdev, LE_LINK);
4960         }
4961
4962         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4963         tmp = cnt;
4964         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4965                 u32 priority = (skb_peek(&chan->data_q))->priority;
4966                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4967                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4968                                skb->len, skb->priority);
4969
4970                         /* Stop if priority has changed */
4971                         if (skb->priority < priority)
4972                                 break;
4973
4974                         skb = skb_dequeue(&chan->data_q);
4975
4976                         hci_send_frame(hdev, skb);
4977                         hdev->le_last_tx = jiffies;
4978
4979                         cnt--;
4980                         chan->sent++;
4981                         chan->conn->sent++;
4982                 }
4983         }
4984
4985         if (hdev->le_pkts)
4986                 hdev->le_cnt = cnt;
4987         else
4988                 hdev->acl_cnt = cnt;
4989
4990         if (cnt != tmp)
4991                 hci_prio_recalculate(hdev, LE_LINK);
4992 }
4993
4994 static void hci_tx_work(struct work_struct *work)
4995 {
4996         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4997         struct sk_buff *skb;
4998
4999         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5000                hdev->sco_cnt, hdev->le_cnt);
5001
5002         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5003                 /* Schedule queues and send stuff to HCI driver */
5004                 hci_sched_acl(hdev);
5005                 hci_sched_sco(hdev);
5006                 hci_sched_esco(hdev);
5007                 hci_sched_le(hdev);
5008         }
5009
5010         /* Send next queued raw (unknown type) packet */
5011         while ((skb = skb_dequeue(&hdev->raw_q)))
5012                 hci_send_frame(hdev, skb);
5013 }
5014
5015 /* ----- HCI RX task (incoming data processing) ----- */
5016
5017 /* ACL data packet */
5018 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5019 {
5020         struct hci_acl_hdr *hdr = (void *) skb->data;
5021         struct hci_conn *conn;
5022         __u16 handle, flags;
5023
5024         skb_pull(skb, HCI_ACL_HDR_SIZE);
5025
5026         handle = __le16_to_cpu(hdr->handle);
5027         flags  = hci_flags(handle);
5028         handle = hci_handle(handle);
5029
5030         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5031                handle, flags);
5032
5033         hdev->stat.acl_rx++;
5034
5035         hci_dev_lock(hdev);
5036         conn = hci_conn_hash_lookup_handle(hdev, handle);
5037         hci_dev_unlock(hdev);
5038
5039         if (conn) {
5040                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5041
5042                 /* Send to upper protocol */
5043                 l2cap_recv_acldata(conn, skb, flags);
5044                 return;
5045         } else {
5046                 BT_ERR("%s ACL packet for unknown connection handle %d",
5047                        hdev->name, handle);
5048         }
5049
5050         kfree_skb(skb);
5051 }
5052
5053 /* SCO data packet */
5054 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5055 {
5056         struct hci_sco_hdr *hdr = (void *) skb->data;
5057         struct hci_conn *conn;
5058         __u16 handle;
5059
5060         skb_pull(skb, HCI_SCO_HDR_SIZE);
5061
5062         handle = __le16_to_cpu(hdr->handle);
5063
5064         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5065
5066         hdev->stat.sco_rx++;
5067
5068         hci_dev_lock(hdev);
5069         conn = hci_conn_hash_lookup_handle(hdev, handle);
5070         hci_dev_unlock(hdev);
5071
5072         if (conn) {
5073                 /* Send to upper protocol */
5074                 sco_recv_scodata(conn, skb);
5075                 return;
5076         } else {
5077                 BT_ERR("%s SCO packet for unknown connection handle %d",
5078                        hdev->name, handle);
5079         }
5080
5081         kfree_skb(skb);
5082 }
5083
5084 static bool hci_req_is_complete(struct hci_dev *hdev)
5085 {
5086         struct sk_buff *skb;
5087
5088         skb = skb_peek(&hdev->cmd_q);
5089         if (!skb)
5090                 return true;
5091
5092         return bt_cb(skb)->req.start;
5093 }
5094
5095 static void hci_resend_last(struct hci_dev *hdev)
5096 {
5097         struct hci_command_hdr *sent;
5098         struct sk_buff *skb;
5099         u16 opcode;
5100
5101         if (!hdev->sent_cmd)
5102                 return;
5103
5104         sent = (void *) hdev->sent_cmd->data;
5105         opcode = __le16_to_cpu(sent->opcode);
5106         if (opcode == HCI_OP_RESET)
5107                 return;
5108
5109         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5110         if (!skb)
5111                 return;
5112
5113         skb_queue_head(&hdev->cmd_q, skb);
5114         queue_work(hdev->workqueue, &hdev->cmd_work);
5115 }
5116
5117 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5118 {
5119         hci_req_complete_t req_complete = NULL;
5120         struct sk_buff *skb;
5121         unsigned long flags;
5122
5123         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5124
5125         /* If the completed command doesn't match the last one that was
5126          * sent we need to do special handling of it.
5127          */
5128         if (!hci_sent_cmd_data(hdev, opcode)) {
5129                 /* Some CSR based controllers generate a spontaneous
5130                  * reset complete event during init and any pending
5131                  * command will never be completed. In such a case we
5132                  * need to resend whatever was the last sent
5133                  * command.
5134                  */
5135                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5136                         hci_resend_last(hdev);
5137
5138                 return;
5139         }
5140
5141         /* If the command succeeded and there's still more commands in
5142          * this request the request is not yet complete.
5143          */
5144         if (!status && !hci_req_is_complete(hdev))
5145                 return;
5146
5147         /* If this was the last command in a request the complete
5148          * callback would be found in hdev->sent_cmd instead of the
5149          * command queue (hdev->cmd_q).
5150          */
5151         if (hdev->sent_cmd) {
5152                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5153
5154                 if (req_complete) {
5155                         /* We must set the complete callback to NULL to
5156                          * avoid calling the callback more than once if
5157                          * this function gets called again.
5158                          */
5159                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5160
5161                         goto call_complete;
5162                 }
5163         }
5164
5165         /* Remove all pending commands belonging to this request */
5166         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5167         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5168                 if (bt_cb(skb)->req.start) {
5169                         __skb_queue_head(&hdev->cmd_q, skb);
5170                         break;
5171                 }
5172
5173                 req_complete = bt_cb(skb)->req.complete;
5174                 kfree_skb(skb);
5175         }
5176         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5177
5178 call_complete:
5179         if (req_complete)
5180                 req_complete(hdev, status);
5181 }
5182
5183 static void hci_rx_work(struct work_struct *work)
5184 {
5185         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5186         struct sk_buff *skb;
5187
5188         BT_DBG("%s", hdev->name);
5189
5190         while ((skb = skb_dequeue(&hdev->rx_q))) {
5191                 /* Send copy to monitor */
5192                 hci_send_to_monitor(hdev, skb);
5193
5194                 if (atomic_read(&hdev->promisc)) {
5195                         /* Send copy to the sockets */
5196                         hci_send_to_sock(hdev, skb);
5197                 }
5198
5199                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5200                         kfree_skb(skb);
5201                         continue;
5202                 }
5203
5204                 if (test_bit(HCI_INIT, &hdev->flags)) {
5205                         /* Don't process data packets in this states. */
5206                         switch (bt_cb(skb)->pkt_type) {
5207                         case HCI_ACLDATA_PKT:
5208                         case HCI_SCODATA_PKT:
5209                                 kfree_skb(skb);
5210                                 continue;
5211                         }
5212                 }
5213
5214                 /* Process frame */
5215                 switch (bt_cb(skb)->pkt_type) {
5216                 case HCI_EVENT_PKT:
5217                         BT_DBG("%s Event packet", hdev->name);
5218                         hci_event_packet(hdev, skb);
5219                         break;
5220
5221                 case HCI_ACLDATA_PKT:
5222                         BT_DBG("%s ACL data packet", hdev->name);
5223                         hci_acldata_packet(hdev, skb);
5224                         break;
5225
5226                 case HCI_SCODATA_PKT:
5227                         BT_DBG("%s SCO data packet", hdev->name);
5228                         hci_scodata_packet(hdev, skb);
5229                         break;
5230
5231                 default:
5232                         kfree_skb(skb);
5233                         break;
5234                 }
5235         }
5236 }
5237
5238 static void hci_cmd_work(struct work_struct *work)
5239 {
5240         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5241         struct sk_buff *skb;
5242
5243         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5244                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5245
5246         /* Send queued commands */
5247         if (atomic_read(&hdev->cmd_cnt)) {
5248                 skb = skb_dequeue(&hdev->cmd_q);
5249                 if (!skb)
5250                         return;
5251
5252                 kfree_skb(hdev->sent_cmd);
5253
5254                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5255                 if (hdev->sent_cmd) {
5256                         atomic_dec(&hdev->cmd_cnt);
5257                         hci_send_frame(hdev, skb);
5258                         if (test_bit(HCI_RESET, &hdev->flags))
5259                                 cancel_delayed_work(&hdev->cmd_timer);
5260                         else
5261                                 schedule_delayed_work(&hdev->cmd_timer,
5262                                                       HCI_CMD_TIMEOUT);
5263                 } else {
5264                         skb_queue_head(&hdev->cmd_q, skb);
5265                         queue_work(hdev->workqueue, &hdev->cmd_work);
5266                 }
5267         }
5268 }