2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
57 /* ----- HCI requests ----- */
59 #define HCI_REQ_DONE 0
60 #define HCI_REQ_PEND 1
61 #define HCI_REQ_CANCELED 2
63 #define hci_req_lock(d) mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
66 /* ---- HCI notifications ---- */
68 static void hci_notify(struct hci_dev *hdev, int event)
70 hci_sock_dev_event(hdev, event);
73 /* ---- HCI debugfs entries ---- */
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
78 struct hci_dev *hdev = file->private_data;
81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
90 struct hci_dev *hdev = file->private_data;
93 size_t buf_size = min(count, (sizeof(buf)-1));
97 if (!test_bit(HCI_UP, &hdev->flags))
100 if (copy_from_user(buf, user_buf, buf_size))
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 hci_req_unlock(hdev);
122 err = -bt_to_errno(skb->data[0]);
128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
133 static const struct file_operations dut_mode_fops = {
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
140 static int features_show(struct seq_file *f, void *ptr)
142 struct hci_dev *hdev = f->private;
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
161 hci_dev_unlock(hdev);
166 static int features_open(struct inode *inode, struct file *file)
168 return single_open(file, features_show, inode->i_private);
171 static const struct file_operations features_fops = {
172 .open = features_open,
175 .release = single_release,
178 static int blacklist_show(struct seq_file *f, void *p)
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
184 list_for_each_entry(b, &hdev->blacklist, list)
185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186 hci_dev_unlock(hdev);
191 static int blacklist_open(struct inode *inode, struct file *file)
193 return single_open(file, blacklist_show, inode->i_private);
196 static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
200 .release = single_release,
203 static int whitelist_show(struct seq_file *f, void *p)
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
216 static int whitelist_open(struct inode *inode, struct file *file)
218 return single_open(file, whitelist_show, inode->i_private);
221 static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
225 .release = single_release,
228 static int uuids_show(struct seq_file *f, void *p)
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
234 list_for_each_entry(uuid, &hdev->uuids, list) {
237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
244 seq_printf(f, "%pUb\n", val);
246 hci_dev_unlock(hdev);
251 static int uuids_open(struct inode *inode, struct file *file)
253 return single_open(file, uuids_show, inode->i_private);
256 static const struct file_operations uuids_fops = {
260 .release = single_release,
263 static int inquiry_cache_show(struct seq_file *f, void *p)
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
282 hci_dev_unlock(hdev);
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
289 return single_open(file, inquiry_cache_show, inode->i_private);
292 static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
296 .release = single_release,
299 static int link_keys_show(struct seq_file *f, void *ptr)
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
310 hci_dev_unlock(hdev);
315 static int link_keys_open(struct inode *inode, struct file *file)
317 return single_open(file, link_keys_show, inode->i_private);
320 static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
324 .release = single_release,
327 static int dev_class_show(struct seq_file *f, void *ptr)
329 struct hci_dev *hdev = f->private;
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
339 static int dev_class_open(struct inode *inode, struct file *file)
341 return single_open(file, dev_class_show, inode->i_private);
344 static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
348 .release = single_release,
351 static int voice_setting_get(void *data, u64 *val)
353 struct hci_dev *hdev = data;
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
365 static int auto_accept_delay_set(void *data, u64 val)
367 struct hci_dev *hdev = data;
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
376 static int auto_accept_delay_get(void *data, u64 *val)
378 struct hci_dev *hdev = data;
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
393 struct hci_dev *hdev = file->private_data;
396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
402 static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
406 struct hci_dev *hdev = file->private_data;
408 size_t buf_size = min(count, (sizeof(buf)-1));
411 if (test_bit(HCI_UP, &hdev->flags))
414 if (copy_from_user(buf, user_buf, buf_size))
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
429 static const struct file_operations force_sc_support_fops = {
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
439 struct hci_dev *hdev = file->private_data;
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
448 static const struct file_operations sc_only_mode_fops = {
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
454 static int idle_timeout_set(void *data, u64 val)
456 struct hci_dev *hdev = data;
458 if (val != 0 && (val < 500 || val > 3600000))
462 hdev->idle_timeout = val;
463 hci_dev_unlock(hdev);
468 static int idle_timeout_get(void *data, u64 *val)
470 struct hci_dev *hdev = data;
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
482 static int rpa_timeout_set(void *data, u64 val)
484 struct hci_dev *hdev = data;
486 /* Require the RPA timeout to be at least 30 seconds and at most
489 if (val < 30 || val > (60 * 60 * 24))
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
499 static int rpa_timeout_get(void *data, u64 *val)
501 struct hci_dev *hdev = data;
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
513 static int sniff_min_interval_set(void *data, u64 val)
515 struct hci_dev *hdev = data;
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
521 hdev->sniff_min_interval = val;
522 hci_dev_unlock(hdev);
527 static int sniff_min_interval_get(void *data, u64 *val)
529 struct hci_dev *hdev = data;
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
541 static int sniff_max_interval_set(void *data, u64 val)
543 struct hci_dev *hdev = data;
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
549 hdev->sniff_max_interval = val;
550 hci_dev_unlock(hdev);
555 static int sniff_max_interval_get(void *data, u64 *val)
557 struct hci_dev *hdev = data;
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
569 static int conn_info_min_age_set(void *data, u64 val)
571 struct hci_dev *hdev = data;
573 if (val == 0 || val > hdev->conn_info_max_age)
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
583 static int conn_info_min_age_get(void *data, u64 *val)
585 struct hci_dev *hdev = data;
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
597 static int conn_info_max_age_set(void *data, u64 val)
599 struct hci_dev *hdev = data;
601 if (val == 0 || val < hdev->conn_info_min_age)
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
611 static int conn_info_max_age_get(void *data, u64 *val)
613 struct hci_dev *hdev = data;
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
625 static int identity_show(struct seq_file *f, void *p)
627 struct hci_dev *hdev = f->private;
633 hci_copy_identity_address(hdev, &addr, &addr_type);
635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636 16, hdev->irk, &hdev->rpa);
638 hci_dev_unlock(hdev);
643 static int identity_open(struct inode *inode, struct file *file)
645 return single_open(file, identity_show, inode->i_private);
648 static const struct file_operations identity_fops = {
649 .open = identity_open,
652 .release = single_release,
655 static int random_address_show(struct seq_file *f, void *p)
657 struct hci_dev *hdev = f->private;
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
666 static int random_address_open(struct inode *inode, struct file *file)
668 return single_open(file, random_address_show, inode->i_private);
671 static const struct file_operations random_address_fops = {
672 .open = random_address_open,
675 .release = single_release,
678 static int static_address_show(struct seq_file *f, void *p)
680 struct hci_dev *hdev = f->private;
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
689 static int static_address_open(struct inode *inode, struct file *file)
691 return single_open(file, static_address_show, inode->i_private);
694 static const struct file_operations static_address_fops = {
695 .open = static_address_open,
698 .release = single_release,
701 static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
705 struct hci_dev *hdev = file->private_data;
708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
714 static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
718 struct hci_dev *hdev = file->private_data;
720 size_t buf_size = min(count, (sizeof(buf)-1));
723 if (test_bit(HCI_UP, &hdev->flags))
726 if (copy_from_user(buf, user_buf, buf_size))
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
741 static const struct file_operations force_static_address_fops = {
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
748 static int white_list_show(struct seq_file *f, void *ptr)
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
761 static int white_list_open(struct inode *inode, struct file *file)
763 return single_open(file, white_list_show, inode->i_private);
766 static const struct file_operations white_list_fops = {
767 .open = white_list_open,
770 .release = single_release,
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
785 hci_dev_unlock(hdev);
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
792 return single_open(file, identity_resolving_keys_show,
796 static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
800 .release = single_release,
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
809 list_for_each_safe(p, n, &hdev->long_term_keys) {
810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814 __le64_to_cpu(ltk->rand), 16, ltk->val);
816 hci_dev_unlock(hdev);
821 static int long_term_keys_open(struct inode *inode, struct file *file)
823 return single_open(file, long_term_keys_show, inode->i_private);
826 static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
830 .release = single_release,
833 static int conn_min_interval_set(void *data, u64 val)
835 struct hci_dev *hdev = data;
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
841 hdev->le_conn_min_interval = val;
842 hci_dev_unlock(hdev);
847 static int conn_min_interval_get(void *data, u64 *val)
849 struct hci_dev *hdev = data;
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
861 static int conn_max_interval_set(void *data, u64 val)
863 struct hci_dev *hdev = data;
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
869 hdev->le_conn_max_interval = val;
870 hci_dev_unlock(hdev);
875 static int conn_max_interval_get(void *data, u64 *val)
877 struct hci_dev *hdev = data;
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
889 static int conn_latency_set(void *data, u64 val)
891 struct hci_dev *hdev = data;
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
903 static int conn_latency_get(void *data, u64 *val)
905 struct hci_dev *hdev = data;
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
917 static int supervision_timeout_set(void *data, u64 val)
919 struct hci_dev *hdev = data;
921 if (val < 0x000a || val > 0x0c80)
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
931 static int supervision_timeout_get(void *data, u64 *val)
933 struct hci_dev *hdev = data;
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
945 static int adv_channel_map_set(void *data, u64 val)
947 struct hci_dev *hdev = data;
949 if (val < 0x01 || val > 0x07)
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
959 static int adv_channel_map_get(void *data, u64 *val)
961 struct hci_dev *hdev = data;
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
973 static int adv_min_interval_set(void *data, u64 val)
975 struct hci_dev *hdev = data;
977 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
981 hdev->le_adv_min_interval = val;
982 hci_dev_unlock(hdev);
987 static int adv_min_interval_get(void *data, u64 *val)
989 struct hci_dev *hdev = data;
992 *val = hdev->le_adv_min_interval;
993 hci_dev_unlock(hdev);
998 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999 adv_min_interval_set, "%llu\n");
1001 static int adv_max_interval_set(void *data, u64 val)
1003 struct hci_dev *hdev = data;
1005 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1009 hdev->le_adv_max_interval = val;
1010 hci_dev_unlock(hdev);
1015 static int adv_max_interval_get(void *data, u64 *val)
1017 struct hci_dev *hdev = data;
1020 *val = hdev->le_adv_max_interval;
1021 hci_dev_unlock(hdev);
1026 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027 adv_max_interval_set, "%llu\n");
1029 static int device_list_show(struct seq_file *f, void *ptr)
1031 struct hci_dev *hdev = f->private;
1032 struct hci_conn_params *p;
1035 list_for_each_entry(p, &hdev->le_conn_params, list) {
1036 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
1039 hci_dev_unlock(hdev);
1044 static int device_list_open(struct inode *inode, struct file *file)
1046 return single_open(file, device_list_show, inode->i_private);
1049 static const struct file_operations device_list_fops = {
1050 .open = device_list_open,
1052 .llseek = seq_lseek,
1053 .release = single_release,
1056 /* ---- HCI requests ---- */
1058 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1060 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1062 if (hdev->req_status == HCI_REQ_PEND) {
1063 hdev->req_result = result;
1064 hdev->req_status = HCI_REQ_DONE;
1065 wake_up_interruptible(&hdev->req_wait_q);
1069 static void hci_req_cancel(struct hci_dev *hdev, int err)
1071 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1073 if (hdev->req_status == HCI_REQ_PEND) {
1074 hdev->req_result = err;
1075 hdev->req_status = HCI_REQ_CANCELED;
1076 wake_up_interruptible(&hdev->req_wait_q);
1080 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1083 struct hci_ev_cmd_complete *ev;
1084 struct hci_event_hdr *hdr;
1085 struct sk_buff *skb;
1089 skb = hdev->recv_evt;
1090 hdev->recv_evt = NULL;
1092 hci_dev_unlock(hdev);
1095 return ERR_PTR(-ENODATA);
1097 if (skb->len < sizeof(*hdr)) {
1098 BT_ERR("Too short HCI event");
1102 hdr = (void *) skb->data;
1103 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1106 if (hdr->evt != event)
1111 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1116 if (skb->len < sizeof(*ev)) {
1117 BT_ERR("Too short cmd_complete event");
1121 ev = (void *) skb->data;
1122 skb_pull(skb, sizeof(*ev));
1124 if (opcode == __le16_to_cpu(ev->opcode))
1127 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128 __le16_to_cpu(ev->opcode));
1132 return ERR_PTR(-ENODATA);
1135 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1136 const void *param, u8 event, u32 timeout)
1138 DECLARE_WAITQUEUE(wait, current);
1139 struct hci_request req;
1142 BT_DBG("%s", hdev->name);
1144 hci_req_init(&req, hdev);
1146 hci_req_add_ev(&req, opcode, plen, param, event);
1148 hdev->req_status = HCI_REQ_PEND;
1150 err = hci_req_run(&req, hci_req_sync_complete);
1152 return ERR_PTR(err);
1154 add_wait_queue(&hdev->req_wait_q, &wait);
1155 set_current_state(TASK_INTERRUPTIBLE);
1157 schedule_timeout(timeout);
1159 remove_wait_queue(&hdev->req_wait_q, &wait);
1161 if (signal_pending(current))
1162 return ERR_PTR(-EINTR);
1164 switch (hdev->req_status) {
1166 err = -bt_to_errno(hdev->req_result);
1169 case HCI_REQ_CANCELED:
1170 err = -hdev->req_result;
1178 hdev->req_status = hdev->req_result = 0;
1180 BT_DBG("%s end: err %d", hdev->name, err);
1183 return ERR_PTR(err);
1185 return hci_get_cmd_complete(hdev, opcode, event);
1187 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1189 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1190 const void *param, u32 timeout)
1192 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1194 EXPORT_SYMBOL(__hci_cmd_sync);
1196 /* Execute request and wait for completion. */
1197 static int __hci_req_sync(struct hci_dev *hdev,
1198 void (*func)(struct hci_request *req,
1200 unsigned long opt, __u32 timeout)
1202 struct hci_request req;
1203 DECLARE_WAITQUEUE(wait, current);
1206 BT_DBG("%s start", hdev->name);
1208 hci_req_init(&req, hdev);
1210 hdev->req_status = HCI_REQ_PEND;
1214 err = hci_req_run(&req, hci_req_sync_complete);
1216 hdev->req_status = 0;
1218 /* ENODATA means the HCI request command queue is empty.
1219 * This can happen when a request with conditionals doesn't
1220 * trigger any commands to be sent. This is normal behavior
1221 * and should not trigger an error return.
1223 if (err == -ENODATA)
1229 add_wait_queue(&hdev->req_wait_q, &wait);
1230 set_current_state(TASK_INTERRUPTIBLE);
1232 schedule_timeout(timeout);
1234 remove_wait_queue(&hdev->req_wait_q, &wait);
1236 if (signal_pending(current))
1239 switch (hdev->req_status) {
1241 err = -bt_to_errno(hdev->req_result);
1244 case HCI_REQ_CANCELED:
1245 err = -hdev->req_result;
1253 hdev->req_status = hdev->req_result = 0;
1255 BT_DBG("%s end: err %d", hdev->name, err);
1260 static int hci_req_sync(struct hci_dev *hdev,
1261 void (*req)(struct hci_request *req,
1263 unsigned long opt, __u32 timeout)
1267 if (!test_bit(HCI_UP, &hdev->flags))
1270 /* Serialize all requests */
1272 ret = __hci_req_sync(hdev, req, opt, timeout);
1273 hci_req_unlock(hdev);
1278 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1280 BT_DBG("%s %ld", req->hdev->name, opt);
1283 set_bit(HCI_RESET, &req->hdev->flags);
1284 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1287 static void bredr_init(struct hci_request *req)
1289 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1291 /* Read Local Supported Features */
1292 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1294 /* Read Local Version */
1295 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1297 /* Read BD Address */
1298 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1301 static void amp_init(struct hci_request *req)
1303 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1305 /* Read Local Version */
1306 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1308 /* Read Local Supported Commands */
1309 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1311 /* Read Local Supported Features */
1312 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1314 /* Read Local AMP Info */
1315 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1317 /* Read Data Blk size */
1318 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1320 /* Read Flow Control Mode */
1321 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1323 /* Read Location Data */
1324 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1327 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1329 struct hci_dev *hdev = req->hdev;
1331 BT_DBG("%s %ld", hdev->name, opt);
1334 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1335 hci_reset_req(req, 0);
1337 switch (hdev->dev_type) {
1347 BT_ERR("Unknown device type %d", hdev->dev_type);
1352 static void bredr_setup(struct hci_request *req)
1354 struct hci_dev *hdev = req->hdev;
1359 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1360 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1362 /* Read Class of Device */
1363 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1365 /* Read Local Name */
1366 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1368 /* Read Voice Setting */
1369 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1371 /* Read Number of Supported IAC */
1372 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1374 /* Read Current IAC LAP */
1375 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1377 /* Clear Event Filters */
1378 flt_type = HCI_FLT_CLEAR_ALL;
1379 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1381 /* Connection accept timeout ~20 secs */
1382 param = cpu_to_le16(0x7d00);
1383 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
1385 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1386 * but it does not support page scan related HCI commands.
1388 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1389 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1390 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1394 static void le_setup(struct hci_request *req)
1396 struct hci_dev *hdev = req->hdev;
1398 /* Read LE Buffer Size */
1399 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1401 /* Read LE Local Supported Features */
1402 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1404 /* Read LE Supported States */
1405 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1407 /* Read LE White List Size */
1408 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1410 /* Clear LE White List */
1411 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1413 /* LE-only controllers have LE implicitly enabled */
1414 if (!lmp_bredr_capable(hdev))
1415 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1418 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1420 if (lmp_ext_inq_capable(hdev))
1423 if (lmp_inq_rssi_capable(hdev))
1426 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1427 hdev->lmp_subver == 0x0757)
1430 if (hdev->manufacturer == 15) {
1431 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1433 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1435 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1439 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1440 hdev->lmp_subver == 0x1805)
1446 static void hci_setup_inquiry_mode(struct hci_request *req)
1450 mode = hci_get_inquiry_mode(req->hdev);
1452 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1455 static void hci_setup_event_mask(struct hci_request *req)
1457 struct hci_dev *hdev = req->hdev;
1459 /* The second byte is 0xff instead of 0x9f (two reserved bits
1460 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1461 * command otherwise.
1463 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1465 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1466 * any event mask for pre 1.2 devices.
1468 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1471 if (lmp_bredr_capable(hdev)) {
1472 events[4] |= 0x01; /* Flow Specification Complete */
1473 events[4] |= 0x02; /* Inquiry Result with RSSI */
1474 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1475 events[5] |= 0x08; /* Synchronous Connection Complete */
1476 events[5] |= 0x10; /* Synchronous Connection Changed */
1478 /* Use a different default for LE-only devices */
1479 memset(events, 0, sizeof(events));
1480 events[0] |= 0x10; /* Disconnection Complete */
1481 events[1] |= 0x08; /* Read Remote Version Information Complete */
1482 events[1] |= 0x20; /* Command Complete */
1483 events[1] |= 0x40; /* Command Status */
1484 events[1] |= 0x80; /* Hardware Error */
1485 events[2] |= 0x04; /* Number of Completed Packets */
1486 events[3] |= 0x02; /* Data Buffer Overflow */
1488 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1489 events[0] |= 0x80; /* Encryption Change */
1490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1494 if (lmp_inq_rssi_capable(hdev))
1495 events[4] |= 0x02; /* Inquiry Result with RSSI */
1497 if (lmp_sniffsubr_capable(hdev))
1498 events[5] |= 0x20; /* Sniff Subrating */
1500 if (lmp_pause_enc_capable(hdev))
1501 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1503 if (lmp_ext_inq_capable(hdev))
1504 events[5] |= 0x40; /* Extended Inquiry Result */
1506 if (lmp_no_flush_capable(hdev))
1507 events[7] |= 0x01; /* Enhanced Flush Complete */
1509 if (lmp_lsto_capable(hdev))
1510 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1512 if (lmp_ssp_capable(hdev)) {
1513 events[6] |= 0x01; /* IO Capability Request */
1514 events[6] |= 0x02; /* IO Capability Response */
1515 events[6] |= 0x04; /* User Confirmation Request */
1516 events[6] |= 0x08; /* User Passkey Request */
1517 events[6] |= 0x10; /* Remote OOB Data Request */
1518 events[6] |= 0x20; /* Simple Pairing Complete */
1519 events[7] |= 0x04; /* User Passkey Notification */
1520 events[7] |= 0x08; /* Keypress Notification */
1521 events[7] |= 0x10; /* Remote Host Supported
1522 * Features Notification
1526 if (lmp_le_capable(hdev))
1527 events[7] |= 0x20; /* LE Meta-Event */
1529 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1532 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1534 struct hci_dev *hdev = req->hdev;
1536 if (lmp_bredr_capable(hdev))
1539 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1541 if (lmp_le_capable(hdev))
1544 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1545 * local supported commands HCI command.
1547 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1548 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1550 if (lmp_ssp_capable(hdev)) {
1551 /* When SSP is available, then the host features page
1552 * should also be available as well. However some
1553 * controllers list the max_page as 0 as long as SSP
1554 * has not been enabled. To achieve proper debugging
1555 * output, force the minimum max_page to 1 at least.
1557 hdev->max_page = 0x01;
1559 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1561 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1562 sizeof(mode), &mode);
1564 struct hci_cp_write_eir cp;
1566 memset(hdev->eir, 0, sizeof(hdev->eir));
1567 memset(&cp, 0, sizeof(cp));
1569 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1573 if (lmp_inq_rssi_capable(hdev))
1574 hci_setup_inquiry_mode(req);
1576 if (lmp_inq_tx_pwr_capable(hdev))
1577 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1579 if (lmp_ext_feat_capable(hdev)) {
1580 struct hci_cp_read_local_ext_features cp;
1583 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1587 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1589 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1594 static void hci_setup_link_policy(struct hci_request *req)
1596 struct hci_dev *hdev = req->hdev;
1597 struct hci_cp_write_def_link_policy cp;
1598 u16 link_policy = 0;
1600 if (lmp_rswitch_capable(hdev))
1601 link_policy |= HCI_LP_RSWITCH;
1602 if (lmp_hold_capable(hdev))
1603 link_policy |= HCI_LP_HOLD;
1604 if (lmp_sniff_capable(hdev))
1605 link_policy |= HCI_LP_SNIFF;
1606 if (lmp_park_capable(hdev))
1607 link_policy |= HCI_LP_PARK;
1609 cp.policy = cpu_to_le16(link_policy);
1610 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1613 static void hci_set_le_support(struct hci_request *req)
1615 struct hci_dev *hdev = req->hdev;
1616 struct hci_cp_write_le_host_supported cp;
1618 /* LE-only devices do not support explicit enablement */
1619 if (!lmp_bredr_capable(hdev))
1622 memset(&cp, 0, sizeof(cp));
1624 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1629 if (cp.le != lmp_host_le_capable(hdev))
1630 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1634 static void hci_set_event_mask_page_2(struct hci_request *req)
1636 struct hci_dev *hdev = req->hdev;
1637 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1639 /* If Connectionless Slave Broadcast master role is supported
1640 * enable all necessary events for it.
1642 if (lmp_csb_master_capable(hdev)) {
1643 events[1] |= 0x40; /* Triggered Clock Capture */
1644 events[1] |= 0x80; /* Synchronization Train Complete */
1645 events[2] |= 0x10; /* Slave Page Response Timeout */
1646 events[2] |= 0x20; /* CSB Channel Map Change */
1649 /* If Connectionless Slave Broadcast slave role is supported
1650 * enable all necessary events for it.
1652 if (lmp_csb_slave_capable(hdev)) {
1653 events[2] |= 0x01; /* Synchronization Train Received */
1654 events[2] |= 0x02; /* CSB Receive */
1655 events[2] |= 0x04; /* CSB Timeout */
1656 events[2] |= 0x08; /* Truncated Page Complete */
1659 /* Enable Authenticated Payload Timeout Expired event if supported */
1660 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1663 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1666 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1668 struct hci_dev *hdev = req->hdev;
1671 hci_setup_event_mask(req);
1673 /* Some Broadcom based Bluetooth controllers do not support the
1674 * Delete Stored Link Key command. They are clearly indicating its
1675 * absence in the bit mask of supported commands.
1677 * Check the supported commands and only if the the command is marked
1678 * as supported send it. If not supported assume that the controller
1679 * does not have actual support for stored link keys which makes this
1680 * command redundant anyway.
1682 * Some controllers indicate that they support handling deleting
1683 * stored link keys, but they don't. The quirk lets a driver
1684 * just disable this command.
1686 if (hdev->commands[6] & 0x80 &&
1687 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1688 struct hci_cp_delete_stored_link_key cp;
1690 bacpy(&cp.bdaddr, BDADDR_ANY);
1691 cp.delete_all = 0x01;
1692 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1696 if (hdev->commands[5] & 0x10)
1697 hci_setup_link_policy(req);
1699 if (lmp_le_capable(hdev)) {
1702 memset(events, 0, sizeof(events));
1705 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1706 events[0] |= 0x10; /* LE Long Term Key Request */
1708 /* If controller supports the Connection Parameters Request
1709 * Link Layer Procedure, enable the corresponding event.
1711 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1712 events[0] |= 0x20; /* LE Remote Connection
1716 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1719 if (hdev->commands[25] & 0x40) {
1720 /* Read LE Advertising Channel TX Power */
1721 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1724 hci_set_le_support(req);
1727 /* Read features beyond page 1 if available */
1728 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1729 struct hci_cp_read_local_ext_features cp;
1732 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1737 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1739 struct hci_dev *hdev = req->hdev;
1741 /* Set event mask page 2 if the HCI command for it is supported */
1742 if (hdev->commands[22] & 0x04)
1743 hci_set_event_mask_page_2(req);
1745 /* Read local codec list if the HCI command is supported */
1746 if (hdev->commands[29] & 0x20)
1747 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1749 /* Get MWS transport configuration if the HCI command is supported */
1750 if (hdev->commands[30] & 0x08)
1751 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1753 /* Check for Synchronization Train support */
1754 if (lmp_sync_train_capable(hdev))
1755 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1757 /* Enable Secure Connections if supported and configured */
1758 if ((lmp_sc_capable(hdev) ||
1759 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1760 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1762 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1763 sizeof(support), &support);
1767 static int __hci_init(struct hci_dev *hdev)
1771 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1775 /* The Device Under Test (DUT) mode is special and available for
1776 * all controller types. So just create it early on.
1778 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1779 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1783 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1784 * BR/EDR/LE type controllers. AMP controllers only need the
1787 if (hdev->dev_type != HCI_BREDR)
1790 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1794 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1798 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1802 /* Only create debugfs entries during the initial setup
1803 * phase and not every time the controller gets powered on.
1805 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1808 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1810 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1811 &hdev->manufacturer);
1812 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1813 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1814 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1816 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1818 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1820 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1821 &conn_info_min_age_fops);
1822 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1823 &conn_info_max_age_fops);
1825 if (lmp_bredr_capable(hdev)) {
1826 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1827 hdev, &inquiry_cache_fops);
1828 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1829 hdev, &link_keys_fops);
1830 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1831 hdev, &dev_class_fops);
1832 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1833 hdev, &voice_setting_fops);
1836 if (lmp_ssp_capable(hdev)) {
1837 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1838 hdev, &auto_accept_delay_fops);
1839 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1840 hdev, &force_sc_support_fops);
1841 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1842 hdev, &sc_only_mode_fops);
1845 if (lmp_sniff_capable(hdev)) {
1846 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1847 hdev, &idle_timeout_fops);
1848 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1849 hdev, &sniff_min_interval_fops);
1850 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1851 hdev, &sniff_max_interval_fops);
1854 if (lmp_le_capable(hdev)) {
1855 debugfs_create_file("identity", 0400, hdev->debugfs,
1856 hdev, &identity_fops);
1857 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1858 hdev, &rpa_timeout_fops);
1859 debugfs_create_file("random_address", 0444, hdev->debugfs,
1860 hdev, &random_address_fops);
1861 debugfs_create_file("static_address", 0444, hdev->debugfs,
1862 hdev, &static_address_fops);
1864 /* For controllers with a public address, provide a debug
1865 * option to force the usage of the configured static
1866 * address. By default the public address is used.
1868 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1869 debugfs_create_file("force_static_address", 0644,
1870 hdev->debugfs, hdev,
1871 &force_static_address_fops);
1873 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1874 &hdev->le_white_list_size);
1875 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1877 debugfs_create_file("identity_resolving_keys", 0400,
1878 hdev->debugfs, hdev,
1879 &identity_resolving_keys_fops);
1880 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1881 hdev, &long_term_keys_fops);
1882 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1883 hdev, &conn_min_interval_fops);
1884 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1885 hdev, &conn_max_interval_fops);
1886 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1887 hdev, &conn_latency_fops);
1888 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1889 hdev, &supervision_timeout_fops);
1890 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1891 hdev, &adv_channel_map_fops);
1892 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1893 hdev, &adv_min_interval_fops);
1894 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1895 hdev, &adv_max_interval_fops);
1896 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1898 debugfs_create_u16("discov_interleaved_timeout", 0644,
1900 &hdev->discov_interleaved_timeout);
1906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1908 struct hci_dev *hdev = req->hdev;
1910 BT_DBG("%s %ld", hdev->name, opt);
1913 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1914 hci_reset_req(req, 0);
1916 /* Read Local Version */
1917 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1919 /* Read BD Address */
1920 if (hdev->set_bdaddr)
1921 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1924 static int __hci_unconf_init(struct hci_dev *hdev)
1928 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1931 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1942 BT_DBG("%s %x", req->hdev->name, scan);
1944 /* Inquiry and Page scans */
1945 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1952 BT_DBG("%s %x", req->hdev->name, auth);
1954 /* Authentication */
1955 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1962 BT_DBG("%s %x", req->hdev->name, encrypt);
1965 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1970 __le16 policy = cpu_to_le16(opt);
1972 BT_DBG("%s %x", req->hdev->name, policy);
1974 /* Default link policy */
1975 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1978 /* Get HCI device by index.
1979 * Device is held on return. */
1980 struct hci_dev *hci_dev_get(int index)
1982 struct hci_dev *hdev = NULL, *d;
1984 BT_DBG("%d", index);
1989 read_lock(&hci_dev_list_lock);
1990 list_for_each_entry(d, &hci_dev_list, list) {
1991 if (d->id == index) {
1992 hdev = hci_dev_hold(d);
1996 read_unlock(&hci_dev_list_lock);
2000 /* ---- Inquiry support ---- */
2002 bool hci_discovery_active(struct hci_dev *hdev)
2004 struct discovery_state *discov = &hdev->discovery;
2006 switch (discov->state) {
2007 case DISCOVERY_FINDING:
2008 case DISCOVERY_RESOLVING:
2016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2018 int old_state = hdev->discovery.state;
2020 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2022 if (old_state == state)
2025 hdev->discovery.state = state;
2028 case DISCOVERY_STOPPED:
2029 hci_update_background_scan(hdev);
2031 if (old_state != DISCOVERY_STARTING)
2032 mgmt_discovering(hdev, 0);
2034 case DISCOVERY_STARTING:
2036 case DISCOVERY_FINDING:
2037 mgmt_discovering(hdev, 1);
2039 case DISCOVERY_RESOLVING:
2041 case DISCOVERY_STOPPING:
2046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2048 struct discovery_state *cache = &hdev->discovery;
2049 struct inquiry_entry *p, *n;
2051 list_for_each_entry_safe(p, n, &cache->all, all) {
2056 INIT_LIST_HEAD(&cache->unknown);
2057 INIT_LIST_HEAD(&cache->resolve);
2060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2063 struct discovery_state *cache = &hdev->discovery;
2064 struct inquiry_entry *e;
2066 BT_DBG("cache %p, %pMR", cache, bdaddr);
2068 list_for_each_entry(e, &cache->all, all) {
2069 if (!bacmp(&e->data.bdaddr, bdaddr))
2076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2079 struct discovery_state *cache = &hdev->discovery;
2080 struct inquiry_entry *e;
2082 BT_DBG("cache %p, %pMR", cache, bdaddr);
2084 list_for_each_entry(e, &cache->unknown, list) {
2085 if (!bacmp(&e->data.bdaddr, bdaddr))
2092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2096 struct discovery_state *cache = &hdev->discovery;
2097 struct inquiry_entry *e;
2099 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2101 list_for_each_entry(e, &cache->resolve, list) {
2102 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2104 if (!bacmp(&e->data.bdaddr, bdaddr))
2111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2112 struct inquiry_entry *ie)
2114 struct discovery_state *cache = &hdev->discovery;
2115 struct list_head *pos = &cache->resolve;
2116 struct inquiry_entry *p;
2118 list_del(&ie->list);
2120 list_for_each_entry(p, &cache->resolve, list) {
2121 if (p->name_state != NAME_PENDING &&
2122 abs(p->data.rssi) >= abs(ie->data.rssi))
2127 list_add(&ie->list, pos);
2130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2133 struct discovery_state *cache = &hdev->discovery;
2134 struct inquiry_entry *ie;
2137 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2139 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2141 if (!data->ssp_mode)
2142 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2144 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2146 if (!ie->data.ssp_mode)
2147 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2149 if (ie->name_state == NAME_NEEDED &&
2150 data->rssi != ie->data.rssi) {
2151 ie->data.rssi = data->rssi;
2152 hci_inquiry_cache_update_resolve(hdev, ie);
2158 /* Entry not in the cache. Add new one. */
2159 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2161 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2165 list_add(&ie->all, &cache->all);
2168 ie->name_state = NAME_KNOWN;
2170 ie->name_state = NAME_NOT_KNOWN;
2171 list_add(&ie->list, &cache->unknown);
2175 if (name_known && ie->name_state != NAME_KNOWN &&
2176 ie->name_state != NAME_PENDING) {
2177 ie->name_state = NAME_KNOWN;
2178 list_del(&ie->list);
2181 memcpy(&ie->data, data, sizeof(*data));
2182 ie->timestamp = jiffies;
2183 cache->timestamp = jiffies;
2185 if (ie->name_state == NAME_NOT_KNOWN)
2186 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2194 struct discovery_state *cache = &hdev->discovery;
2195 struct inquiry_info *info = (struct inquiry_info *) buf;
2196 struct inquiry_entry *e;
2199 list_for_each_entry(e, &cache->all, all) {
2200 struct inquiry_data *data = &e->data;
2205 bacpy(&info->bdaddr, &data->bdaddr);
2206 info->pscan_rep_mode = data->pscan_rep_mode;
2207 info->pscan_period_mode = data->pscan_period_mode;
2208 info->pscan_mode = data->pscan_mode;
2209 memcpy(info->dev_class, data->dev_class, 3);
2210 info->clock_offset = data->clock_offset;
2216 BT_DBG("cache %p, copied %d", cache, copied);
2220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2222 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2223 struct hci_dev *hdev = req->hdev;
2224 struct hci_cp_inquiry cp;
2226 BT_DBG("%s", hdev->name);
2228 if (test_bit(HCI_INQUIRY, &hdev->flags))
2232 memcpy(&cp.lap, &ir->lap, 3);
2233 cp.length = ir->length;
2234 cp.num_rsp = ir->num_rsp;
2235 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2238 static int wait_inquiry(void *word)
2241 return signal_pending(current);
2244 int hci_inquiry(void __user *arg)
2246 __u8 __user *ptr = arg;
2247 struct hci_inquiry_req ir;
2248 struct hci_dev *hdev;
2249 int err = 0, do_inquiry = 0, max_rsp;
2253 if (copy_from_user(&ir, ptr, sizeof(ir)))
2256 hdev = hci_dev_get(ir.dev_id);
2260 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2265 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2270 if (hdev->dev_type != HCI_BREDR) {
2275 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2281 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2282 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2283 hci_inquiry_cache_flush(hdev);
2286 hci_dev_unlock(hdev);
2288 timeo = ir.length * msecs_to_jiffies(2000);
2291 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2296 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2297 * cleared). If it is interrupted by a signal, return -EINTR.
2299 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2300 TASK_INTERRUPTIBLE))
2304 /* for unlimited number of responses we will use buffer with
2307 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2309 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2310 * copy it to the user space.
2312 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2319 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2320 hci_dev_unlock(hdev);
2322 BT_DBG("num_rsp %d", ir.num_rsp);
2324 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2326 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2339 static int hci_dev_do_open(struct hci_dev *hdev)
2343 BT_DBG("%s %p", hdev->name, hdev);
2347 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2352 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2353 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2354 /* Check for rfkill but allow the HCI setup stage to
2355 * proceed (which in itself doesn't cause any RF activity).
2357 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2362 /* Check for valid public address or a configured static
2363 * random adddress, but let the HCI setup proceed to
2364 * be able to determine if there is a public address
2367 * In case of user channel usage, it is not important
2368 * if a public address or static random address is
2371 * This check is only valid for BR/EDR controllers
2372 * since AMP controllers do not have an address.
2374 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2375 hdev->dev_type == HCI_BREDR &&
2376 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2377 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2378 ret = -EADDRNOTAVAIL;
2383 if (test_bit(HCI_UP, &hdev->flags)) {
2388 if (hdev->open(hdev)) {
2393 atomic_set(&hdev->cmd_cnt, 1);
2394 set_bit(HCI_INIT, &hdev->flags);
2396 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2398 ret = hdev->setup(hdev);
2400 /* The transport driver can set these quirks before
2401 * creating the HCI device or in its setup callback.
2403 * In case any of them is set, the controller has to
2404 * start up as unconfigured.
2406 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2407 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2408 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2410 /* For an unconfigured controller it is required to
2411 * read at least the version information provided by
2412 * the Read Local Version Information command.
2414 * If the set_bdaddr driver callback is provided, then
2415 * also the original Bluetooth public device address
2416 * will be read using the Read BD Address command.
2418 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2419 ret = __hci_unconf_init(hdev);
2422 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2423 /* If public address change is configured, ensure that
2424 * the address gets programmed. If the driver does not
2425 * support changing the public address, fail the power
2428 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2430 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2432 ret = -EADDRNOTAVAIL;
2436 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2437 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2438 ret = __hci_init(hdev);
2441 clear_bit(HCI_INIT, &hdev->flags);
2445 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2446 set_bit(HCI_UP, &hdev->flags);
2447 hci_notify(hdev, HCI_DEV_UP);
2448 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2449 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2450 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2451 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2452 hdev->dev_type == HCI_BREDR) {
2454 mgmt_powered(hdev, 1);
2455 hci_dev_unlock(hdev);
2458 /* Init failed, cleanup */
2459 flush_work(&hdev->tx_work);
2460 flush_work(&hdev->cmd_work);
2461 flush_work(&hdev->rx_work);
2463 skb_queue_purge(&hdev->cmd_q);
2464 skb_queue_purge(&hdev->rx_q);
2469 if (hdev->sent_cmd) {
2470 kfree_skb(hdev->sent_cmd);
2471 hdev->sent_cmd = NULL;
2475 hdev->flags &= BIT(HCI_RAW);
2479 hci_req_unlock(hdev);
2483 /* ---- HCI ioctl helpers ---- */
2485 int hci_dev_open(__u16 dev)
2487 struct hci_dev *hdev;
2490 hdev = hci_dev_get(dev);
2494 /* Devices that are marked as unconfigured can only be powered
2495 * up as user channel. Trying to bring them up as normal devices
2496 * will result into a failure. Only user channel operation is
2499 * When this function is called for a user channel, the flag
2500 * HCI_USER_CHANNEL will be set first before attempting to
2503 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2504 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2509 /* We need to ensure that no other power on/off work is pending
2510 * before proceeding to call hci_dev_do_open. This is
2511 * particularly important if the setup procedure has not yet
2514 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2515 cancel_delayed_work(&hdev->power_off);
2517 /* After this call it is guaranteed that the setup procedure
2518 * has finished. This means that error conditions like RFKILL
2519 * or no valid public or static random address apply.
2521 flush_workqueue(hdev->req_workqueue);
2523 /* For controllers not using the management interface and that
2524 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2525 * so that pairing works for them. Once the management interface
2526 * is in use this bit will be cleared again and userspace has
2527 * to explicitly enable it.
2529 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2530 !test_bit(HCI_MGMT, &hdev->dev_flags))
2531 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2533 err = hci_dev_do_open(hdev);
2540 /* This function requires the caller holds hdev->lock */
2541 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2543 struct hci_conn_params *p;
2545 list_for_each_entry(p, &hdev->le_conn_params, list)
2546 list_del_init(&p->action);
2548 BT_DBG("All LE pending actions cleared");
2551 static int hci_dev_do_close(struct hci_dev *hdev)
2553 BT_DBG("%s %p", hdev->name, hdev);
2555 cancel_delayed_work(&hdev->power_off);
2557 hci_req_cancel(hdev, ENODEV);
2560 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2561 cancel_delayed_work_sync(&hdev->cmd_timer);
2562 hci_req_unlock(hdev);
2566 /* Flush RX and TX works */
2567 flush_work(&hdev->tx_work);
2568 flush_work(&hdev->rx_work);
2570 if (hdev->discov_timeout > 0) {
2571 cancel_delayed_work(&hdev->discov_off);
2572 hdev->discov_timeout = 0;
2573 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2574 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2577 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2578 cancel_delayed_work(&hdev->service_cache);
2580 cancel_delayed_work_sync(&hdev->le_scan_disable);
2582 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2583 cancel_delayed_work_sync(&hdev->rpa_expired);
2586 hci_inquiry_cache_flush(hdev);
2587 hci_conn_hash_flush(hdev);
2588 hci_pend_le_actions_clear(hdev);
2589 hci_dev_unlock(hdev);
2591 hci_notify(hdev, HCI_DEV_DOWN);
2597 skb_queue_purge(&hdev->cmd_q);
2598 atomic_set(&hdev->cmd_cnt, 1);
2599 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2600 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2601 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2602 set_bit(HCI_INIT, &hdev->flags);
2603 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2604 clear_bit(HCI_INIT, &hdev->flags);
2607 /* flush cmd work */
2608 flush_work(&hdev->cmd_work);
2611 skb_queue_purge(&hdev->rx_q);
2612 skb_queue_purge(&hdev->cmd_q);
2613 skb_queue_purge(&hdev->raw_q);
2615 /* Drop last sent command */
2616 if (hdev->sent_cmd) {
2617 cancel_delayed_work_sync(&hdev->cmd_timer);
2618 kfree_skb(hdev->sent_cmd);
2619 hdev->sent_cmd = NULL;
2622 kfree_skb(hdev->recv_evt);
2623 hdev->recv_evt = NULL;
2625 /* After this point our queues are empty
2626 * and no tasks are scheduled. */
2630 hdev->flags &= BIT(HCI_RAW);
2631 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2633 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2634 if (hdev->dev_type == HCI_BREDR) {
2636 mgmt_powered(hdev, 0);
2637 hci_dev_unlock(hdev);
2641 /* Controller radio is available but is currently powered down */
2642 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2644 memset(hdev->eir, 0, sizeof(hdev->eir));
2645 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2646 bacpy(&hdev->random_addr, BDADDR_ANY);
2648 hci_req_unlock(hdev);
2654 int hci_dev_close(__u16 dev)
2656 struct hci_dev *hdev;
2659 hdev = hci_dev_get(dev);
2663 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2668 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2669 cancel_delayed_work(&hdev->power_off);
2671 err = hci_dev_do_close(hdev);
2678 int hci_dev_reset(__u16 dev)
2680 struct hci_dev *hdev;
2683 hdev = hci_dev_get(dev);
2689 if (!test_bit(HCI_UP, &hdev->flags)) {
2694 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2699 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2705 skb_queue_purge(&hdev->rx_q);
2706 skb_queue_purge(&hdev->cmd_q);
2709 hci_inquiry_cache_flush(hdev);
2710 hci_conn_hash_flush(hdev);
2711 hci_dev_unlock(hdev);
2716 atomic_set(&hdev->cmd_cnt, 1);
2717 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2719 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2722 hci_req_unlock(hdev);
2727 int hci_dev_reset_stat(__u16 dev)
2729 struct hci_dev *hdev;
2732 hdev = hci_dev_get(dev);
2736 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2741 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2746 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2753 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2755 bool conn_changed, discov_changed;
2757 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2759 if ((scan & SCAN_PAGE))
2760 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2763 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2766 if ((scan & SCAN_INQUIRY)) {
2767 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2770 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2771 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2775 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2778 if (conn_changed || discov_changed) {
2779 /* In case this was disabled through mgmt */
2780 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2782 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2783 mgmt_update_adv_data(hdev);
2785 mgmt_new_settings(hdev);
2789 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2791 struct hci_dev *hdev;
2792 struct hci_dev_req dr;
2795 if (copy_from_user(&dr, arg, sizeof(dr)))
2798 hdev = hci_dev_get(dr.dev_id);
2802 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2807 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2812 if (hdev->dev_type != HCI_BREDR) {
2817 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2824 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2829 if (!lmp_encrypt_capable(hdev)) {
2834 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2835 /* Auth must be enabled first */
2836 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2842 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2847 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2850 /* Ensure that the connectable and discoverable states
2851 * get correctly modified as this was a non-mgmt change.
2854 hci_update_scan_state(hdev, dr.dev_opt);
2858 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2862 case HCISETLINKMODE:
2863 hdev->link_mode = ((__u16) dr.dev_opt) &
2864 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2868 hdev->pkt_type = (__u16) dr.dev_opt;
2872 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2873 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2877 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2878 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2891 int hci_get_dev_list(void __user *arg)
2893 struct hci_dev *hdev;
2894 struct hci_dev_list_req *dl;
2895 struct hci_dev_req *dr;
2896 int n = 0, size, err;
2899 if (get_user(dev_num, (__u16 __user *) arg))
2902 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2905 size = sizeof(*dl) + dev_num * sizeof(*dr);
2907 dl = kzalloc(size, GFP_KERNEL);
2913 read_lock(&hci_dev_list_lock);
2914 list_for_each_entry(hdev, &hci_dev_list, list) {
2915 unsigned long flags = hdev->flags;
2917 /* When the auto-off is configured it means the transport
2918 * is running, but in that case still indicate that the
2919 * device is actually down.
2921 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2922 flags &= ~BIT(HCI_UP);
2924 (dr + n)->dev_id = hdev->id;
2925 (dr + n)->dev_opt = flags;
2930 read_unlock(&hci_dev_list_lock);
2933 size = sizeof(*dl) + n * sizeof(*dr);
2935 err = copy_to_user(arg, dl, size);
2938 return err ? -EFAULT : 0;
2941 int hci_get_dev_info(void __user *arg)
2943 struct hci_dev *hdev;
2944 struct hci_dev_info di;
2945 unsigned long flags;
2948 if (copy_from_user(&di, arg, sizeof(di)))
2951 hdev = hci_dev_get(di.dev_id);
2955 /* When the auto-off is configured it means the transport
2956 * is running, but in that case still indicate that the
2957 * device is actually down.
2959 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2960 flags = hdev->flags & ~BIT(HCI_UP);
2962 flags = hdev->flags;
2964 strcpy(di.name, hdev->name);
2965 di.bdaddr = hdev->bdaddr;
2966 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2968 di.pkt_type = hdev->pkt_type;
2969 if (lmp_bredr_capable(hdev)) {
2970 di.acl_mtu = hdev->acl_mtu;
2971 di.acl_pkts = hdev->acl_pkts;
2972 di.sco_mtu = hdev->sco_mtu;
2973 di.sco_pkts = hdev->sco_pkts;
2975 di.acl_mtu = hdev->le_mtu;
2976 di.acl_pkts = hdev->le_pkts;
2980 di.link_policy = hdev->link_policy;
2981 di.link_mode = hdev->link_mode;
2983 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2984 memcpy(&di.features, &hdev->features, sizeof(di.features));
2986 if (copy_to_user(arg, &di, sizeof(di)))
2994 /* ---- Interface to HCI drivers ---- */
2996 static int hci_rfkill_set_block(void *data, bool blocked)
2998 struct hci_dev *hdev = data;
3000 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3002 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3006 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3007 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3008 !test_bit(HCI_CONFIG, &hdev->dev_flags))
3009 hci_dev_do_close(hdev);
3011 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3017 static const struct rfkill_ops hci_rfkill_ops = {
3018 .set_block = hci_rfkill_set_block,
3021 static void hci_power_on(struct work_struct *work)
3023 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3026 BT_DBG("%s", hdev->name);
3028 err = hci_dev_do_open(hdev);
3030 mgmt_set_powered_failed(hdev, err);
3034 /* During the HCI setup phase, a few error conditions are
3035 * ignored and they need to be checked now. If they are still
3036 * valid, it is important to turn the device back off.
3038 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3039 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3040 (hdev->dev_type == HCI_BREDR &&
3041 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3042 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3043 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3044 hci_dev_do_close(hdev);
3045 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3046 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3047 HCI_AUTO_OFF_TIMEOUT);
3050 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3051 /* For unconfigured devices, set the HCI_RAW flag
3052 * so that userspace can easily identify them.
3054 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3055 set_bit(HCI_RAW, &hdev->flags);
3057 /* For fully configured devices, this will send
3058 * the Index Added event. For unconfigured devices,
3059 * it will send Unconfigued Index Added event.
3061 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3062 * and no event will be send.
3064 mgmt_index_added(hdev);
3065 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3066 /* When the controller is now configured, then it
3067 * is important to clear the HCI_RAW flag.
3069 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3070 clear_bit(HCI_RAW, &hdev->flags);
3072 /* Powering on the controller with HCI_CONFIG set only
3073 * happens with the transition from unconfigured to
3074 * configured. This will send the Index Added event.
3076 mgmt_index_added(hdev);
3080 static void hci_power_off(struct work_struct *work)
3082 struct hci_dev *hdev = container_of(work, struct hci_dev,
3085 BT_DBG("%s", hdev->name);
3087 hci_dev_do_close(hdev);
3090 static void hci_discov_off(struct work_struct *work)
3092 struct hci_dev *hdev;
3094 hdev = container_of(work, struct hci_dev, discov_off.work);
3096 BT_DBG("%s", hdev->name);
3098 mgmt_discoverable_timeout(hdev);
3101 void hci_uuids_clear(struct hci_dev *hdev)
3103 struct bt_uuid *uuid, *tmp;
3105 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3106 list_del(&uuid->list);
3111 void hci_link_keys_clear(struct hci_dev *hdev)
3113 struct list_head *p, *n;
3115 list_for_each_safe(p, n, &hdev->link_keys) {
3116 struct link_key *key;
3118 key = list_entry(p, struct link_key, list);
3125 void hci_smp_ltks_clear(struct hci_dev *hdev)
3127 struct smp_ltk *k, *tmp;
3129 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3135 void hci_smp_irks_clear(struct hci_dev *hdev)
3137 struct smp_irk *k, *tmp;
3139 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3145 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3149 list_for_each_entry(k, &hdev->link_keys, list)
3150 if (bacmp(bdaddr, &k->bdaddr) == 0)
3156 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3157 u8 key_type, u8 old_key_type)
3160 if (key_type < 0x03)
3163 /* Debug keys are insecure so don't store them persistently */
3164 if (key_type == HCI_LK_DEBUG_COMBINATION)
3167 /* Changed combination key and there's no previous one */
3168 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3171 /* Security mode 3 case */
3175 /* Neither local nor remote side had no-bonding as requirement */
3176 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3179 /* Local side had dedicated bonding as requirement */
3180 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3183 /* Remote side had dedicated bonding as requirement */
3184 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3187 /* If none of the above criteria match, then don't store the key
3192 static u8 ltk_role(u8 type)
3194 if (type == SMP_LTK)
3195 return HCI_ROLE_MASTER;
3197 return HCI_ROLE_SLAVE;
3200 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3205 list_for_each_entry(k, &hdev->long_term_keys, list) {
3206 if (k->ediv != ediv || k->rand != rand)
3209 if (ltk_role(k->type) != role)
3218 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3219 u8 addr_type, u8 role)
3223 list_for_each_entry(k, &hdev->long_term_keys, list)
3224 if (addr_type == k->bdaddr_type &&
3225 bacmp(bdaddr, &k->bdaddr) == 0 &&
3226 ltk_role(k->type) == role)
3232 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3234 struct smp_irk *irk;
3236 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3237 if (!bacmp(&irk->rpa, rpa))
3241 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3242 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3243 bacpy(&irk->rpa, rpa);
3251 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3254 struct smp_irk *irk;
3256 /* Identity Address must be public or static random */
3257 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3260 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3261 if (addr_type == irk->addr_type &&
3262 bacmp(bdaddr, &irk->bdaddr) == 0)
3269 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3270 bdaddr_t *bdaddr, u8 *val, u8 type,
3271 u8 pin_len, bool *persistent)
3273 struct link_key *key, *old_key;
3276 old_key = hci_find_link_key(hdev, bdaddr);
3278 old_key_type = old_key->type;
3281 old_key_type = conn ? conn->key_type : 0xff;
3282 key = kzalloc(sizeof(*key), GFP_KERNEL);
3285 list_add(&key->list, &hdev->link_keys);
3288 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3290 /* Some buggy controller combinations generate a changed
3291 * combination key for legacy pairing even when there's no
3293 if (type == HCI_LK_CHANGED_COMBINATION &&
3294 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3295 type = HCI_LK_COMBINATION;
3297 conn->key_type = type;
3300 bacpy(&key->bdaddr, bdaddr);
3301 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3302 key->pin_len = pin_len;
3304 if (type == HCI_LK_CHANGED_COMBINATION)
3305 key->type = old_key_type;
3310 *persistent = hci_persistent_key(hdev, conn, type,
3316 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3317 u8 addr_type, u8 type, u8 authenticated,
3318 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3320 struct smp_ltk *key, *old_key;
3321 u8 role = ltk_role(type);
3323 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3327 key = kzalloc(sizeof(*key), GFP_KERNEL);
3330 list_add(&key->list, &hdev->long_term_keys);
3333 bacpy(&key->bdaddr, bdaddr);
3334 key->bdaddr_type = addr_type;
3335 memcpy(key->val, tk, sizeof(key->val));
3336 key->authenticated = authenticated;
3339 key->enc_size = enc_size;
3345 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3346 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3348 struct smp_irk *irk;
3350 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3352 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3356 bacpy(&irk->bdaddr, bdaddr);
3357 irk->addr_type = addr_type;
3359 list_add(&irk->list, &hdev->identity_resolving_keys);
3362 memcpy(irk->val, val, 16);
3363 bacpy(&irk->rpa, rpa);
3368 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3370 struct link_key *key;
3372 key = hci_find_link_key(hdev, bdaddr);
3376 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3378 list_del(&key->list);
3384 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3386 struct smp_ltk *k, *tmp;
3389 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3390 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3393 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3400 return removed ? 0 : -ENOENT;
3403 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3405 struct smp_irk *k, *tmp;
3407 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3408 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3411 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3418 /* HCI command timer function */
3419 static void hci_cmd_timeout(struct work_struct *work)
3421 struct hci_dev *hdev = container_of(work, struct hci_dev,
3424 if (hdev->sent_cmd) {
3425 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3426 u16 opcode = __le16_to_cpu(sent->opcode);
3428 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3430 BT_ERR("%s command tx timeout", hdev->name);
3433 atomic_set(&hdev->cmd_cnt, 1);
3434 queue_work(hdev->workqueue, &hdev->cmd_work);
3437 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3440 struct oob_data *data;
3442 list_for_each_entry(data, &hdev->remote_oob_data, list)
3443 if (bacmp(bdaddr, &data->bdaddr) == 0)
3449 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3451 struct oob_data *data;
3453 data = hci_find_remote_oob_data(hdev, bdaddr);
3457 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3459 list_del(&data->list);
3465 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3467 struct oob_data *data, *n;
3469 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3470 list_del(&data->list);
3475 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3476 u8 *hash, u8 *randomizer)
3478 struct oob_data *data;
3480 data = hci_find_remote_oob_data(hdev, bdaddr);
3482 data = kmalloc(sizeof(*data), GFP_KERNEL);
3486 bacpy(&data->bdaddr, bdaddr);
3487 list_add(&data->list, &hdev->remote_oob_data);
3490 memcpy(data->hash192, hash, sizeof(data->hash192));
3491 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3493 memset(data->hash256, 0, sizeof(data->hash256));
3494 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3496 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3501 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3502 u8 *hash192, u8 *randomizer192,
3503 u8 *hash256, u8 *randomizer256)
3505 struct oob_data *data;
3507 data = hci_find_remote_oob_data(hdev, bdaddr);
3509 data = kmalloc(sizeof(*data), GFP_KERNEL);
3513 bacpy(&data->bdaddr, bdaddr);
3514 list_add(&data->list, &hdev->remote_oob_data);
3517 memcpy(data->hash192, hash192, sizeof(data->hash192));
3518 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3520 memcpy(data->hash256, hash256, sizeof(data->hash256));
3521 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3523 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3528 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3529 bdaddr_t *bdaddr, u8 type)
3531 struct bdaddr_list *b;
3533 list_for_each_entry(b, bdaddr_list, list) {
3534 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3541 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3543 struct list_head *p, *n;
3545 list_for_each_safe(p, n, bdaddr_list) {
3546 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3553 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3555 struct bdaddr_list *entry;
3557 if (!bacmp(bdaddr, BDADDR_ANY))
3560 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3563 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3567 bacpy(&entry->bdaddr, bdaddr);
3568 entry->bdaddr_type = type;
3570 list_add(&entry->list, list);
3575 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3577 struct bdaddr_list *entry;
3579 if (!bacmp(bdaddr, BDADDR_ANY)) {
3580 hci_bdaddr_list_clear(list);
3584 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3588 list_del(&entry->list);
3594 /* This function requires the caller holds hdev->lock */
3595 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3596 bdaddr_t *addr, u8 addr_type)
3598 struct hci_conn_params *params;
3600 /* The conn params list only contains identity addresses */
3601 if (!hci_is_identity_address(addr, addr_type))
3604 list_for_each_entry(params, &hdev->le_conn_params, list) {
3605 if (bacmp(¶ms->addr, addr) == 0 &&
3606 params->addr_type == addr_type) {
3614 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3616 struct hci_conn *conn;
3618 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3622 if (conn->dst_type != type)
3625 if (conn->state != BT_CONNECTED)
3631 /* This function requires the caller holds hdev->lock */
3632 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3633 bdaddr_t *addr, u8 addr_type)
3635 struct hci_conn_params *param;
3637 /* The list only contains identity addresses */
3638 if (!hci_is_identity_address(addr, addr_type))
3641 list_for_each_entry(param, list, action) {
3642 if (bacmp(¶m->addr, addr) == 0 &&
3643 param->addr_type == addr_type)
3650 /* This function requires the caller holds hdev->lock */
3651 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3652 bdaddr_t *addr, u8 addr_type)
3654 struct hci_conn_params *params;
3656 if (!hci_is_identity_address(addr, addr_type))
3659 params = hci_conn_params_lookup(hdev, addr, addr_type);
3663 params = kzalloc(sizeof(*params), GFP_KERNEL);
3665 BT_ERR("Out of memory");
3669 bacpy(¶ms->addr, addr);
3670 params->addr_type = addr_type;
3672 list_add(¶ms->list, &hdev->le_conn_params);
3673 INIT_LIST_HEAD(¶ms->action);
3675 params->conn_min_interval = hdev->le_conn_min_interval;
3676 params->conn_max_interval = hdev->le_conn_max_interval;
3677 params->conn_latency = hdev->le_conn_latency;
3678 params->supervision_timeout = hdev->le_supv_timeout;
3679 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3681 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3686 /* This function requires the caller holds hdev->lock */
3687 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3690 struct hci_conn_params *params;
3692 params = hci_conn_params_add(hdev, addr, addr_type);
3696 if (params->auto_connect == auto_connect)
3699 list_del_init(¶ms->action);
3701 switch (auto_connect) {
3702 case HCI_AUTO_CONN_DISABLED:
3703 case HCI_AUTO_CONN_LINK_LOSS:
3704 hci_update_background_scan(hdev);
3706 case HCI_AUTO_CONN_REPORT:
3707 list_add(¶ms->action, &hdev->pend_le_reports);
3708 hci_update_background_scan(hdev);
3710 case HCI_AUTO_CONN_DIRECT:
3711 case HCI_AUTO_CONN_ALWAYS:
3712 if (!is_connected(hdev, addr, addr_type)) {
3713 list_add(¶ms->action, &hdev->pend_le_conns);
3714 hci_update_background_scan(hdev);
3719 params->auto_connect = auto_connect;
3721 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3727 /* This function requires the caller holds hdev->lock */
3728 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3730 struct hci_conn_params *params;
3732 params = hci_conn_params_lookup(hdev, addr, addr_type);
3736 list_del(¶ms->action);
3737 list_del(¶ms->list);
3740 hci_update_background_scan(hdev);
3742 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3745 /* This function requires the caller holds hdev->lock */
3746 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3748 struct hci_conn_params *params, *tmp;
3750 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3751 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3753 list_del(¶ms->list);
3757 BT_DBG("All LE disabled connection parameters were removed");
3760 /* This function requires the caller holds hdev->lock */
3761 void hci_conn_params_clear_all(struct hci_dev *hdev)
3763 struct hci_conn_params *params, *tmp;
3765 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3766 list_del(¶ms->action);
3767 list_del(¶ms->list);
3771 hci_update_background_scan(hdev);
3773 BT_DBG("All LE connection parameters were removed");
3776 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3779 BT_ERR("Failed to start inquiry: status %d", status);
3782 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3783 hci_dev_unlock(hdev);
3788 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3790 /* General inquiry access code (GIAC) */
3791 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3792 struct hci_request req;
3793 struct hci_cp_inquiry cp;
3797 BT_ERR("Failed to disable LE scanning: status %d", status);
3801 switch (hdev->discovery.type) {
3802 case DISCOV_TYPE_LE:
3804 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3805 hci_dev_unlock(hdev);
3808 case DISCOV_TYPE_INTERLEAVED:
3809 hci_req_init(&req, hdev);
3811 memset(&cp, 0, sizeof(cp));
3812 memcpy(&cp.lap, lap, sizeof(cp.lap));
3813 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3814 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3818 hci_inquiry_cache_flush(hdev);
3820 err = hci_req_run(&req, inquiry_complete);
3822 BT_ERR("Inquiry request failed: err %d", err);
3823 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3826 hci_dev_unlock(hdev);
3831 static void le_scan_disable_work(struct work_struct *work)
3833 struct hci_dev *hdev = container_of(work, struct hci_dev,
3834 le_scan_disable.work);
3835 struct hci_request req;
3838 BT_DBG("%s", hdev->name);
3840 hci_req_init(&req, hdev);
3842 hci_req_add_le_scan_disable(&req);
3844 err = hci_req_run(&req, le_scan_disable_work_complete);
3846 BT_ERR("Disable LE scanning request failed: err %d", err);
3849 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3851 struct hci_dev *hdev = req->hdev;
3853 /* If we're advertising or initiating an LE connection we can't
3854 * go ahead and change the random address at this time. This is
3855 * because the eventual initiator address used for the
3856 * subsequently created connection will be undefined (some
3857 * controllers use the new address and others the one we had
3858 * when the operation started).
3860 * In this kind of scenario skip the update and let the random
3861 * address be updated at the next cycle.
3863 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3864 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3865 BT_DBG("Deferring random address update");
3869 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3872 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3875 struct hci_dev *hdev = req->hdev;
3878 /* If privacy is enabled use a resolvable private address. If
3879 * current RPA has expired or there is something else than
3880 * the current RPA in use, then generate a new one.
3882 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3885 *own_addr_type = ADDR_LE_DEV_RANDOM;
3887 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3888 !bacmp(&hdev->random_addr, &hdev->rpa))
3891 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3893 BT_ERR("%s failed to generate new RPA", hdev->name);
3897 set_random_addr(req, &hdev->rpa);
3899 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3900 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3905 /* In case of required privacy without resolvable private address,
3906 * use an unresolvable private address. This is useful for active
3907 * scanning and non-connectable advertising.
3909 if (require_privacy) {
3912 get_random_bytes(&urpa, 6);
3913 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3915 *own_addr_type = ADDR_LE_DEV_RANDOM;
3916 set_random_addr(req, &urpa);
3920 /* If forcing static address is in use or there is no public
3921 * address use the static address as random address (but skip
3922 * the HCI command if the current random address is already the
3925 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3926 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3927 *own_addr_type = ADDR_LE_DEV_RANDOM;
3928 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3929 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3930 &hdev->static_addr);
3934 /* Neither privacy nor static address is being used so use a
3937 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3942 /* Copy the Identity Address of the controller.
3944 * If the controller has a public BD_ADDR, then by default use that one.
3945 * If this is a LE only controller without a public address, default to
3946 * the static random address.
3948 * For debugging purposes it is possible to force controllers with a
3949 * public address to use the static random address instead.
3951 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3954 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3955 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3956 bacpy(bdaddr, &hdev->static_addr);
3957 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3959 bacpy(bdaddr, &hdev->bdaddr);
3960 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3964 /* Alloc HCI device */
3965 struct hci_dev *hci_alloc_dev(void)
3967 struct hci_dev *hdev;
3969 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3973 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3974 hdev->esco_type = (ESCO_HV1);
3975 hdev->link_mode = (HCI_LM_ACCEPT);
3976 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3977 hdev->io_capability = 0x03; /* No Input No Output */
3978 hdev->manufacturer = 0xffff; /* Default to internal use */
3979 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3980 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3982 hdev->sniff_max_interval = 800;
3983 hdev->sniff_min_interval = 80;
3985 hdev->le_adv_channel_map = 0x07;
3986 hdev->le_adv_min_interval = 0x0800;
3987 hdev->le_adv_max_interval = 0x0800;
3988 hdev->le_scan_interval = 0x0060;
3989 hdev->le_scan_window = 0x0030;
3990 hdev->le_conn_min_interval = 0x0028;
3991 hdev->le_conn_max_interval = 0x0038;
3992 hdev->le_conn_latency = 0x0000;
3993 hdev->le_supv_timeout = 0x002a;
3995 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3996 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3997 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3998 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4000 mutex_init(&hdev->lock);
4001 mutex_init(&hdev->req_lock);
4003 INIT_LIST_HEAD(&hdev->mgmt_pending);
4004 INIT_LIST_HEAD(&hdev->blacklist);
4005 INIT_LIST_HEAD(&hdev->whitelist);
4006 INIT_LIST_HEAD(&hdev->uuids);
4007 INIT_LIST_HEAD(&hdev->link_keys);
4008 INIT_LIST_HEAD(&hdev->long_term_keys);
4009 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4010 INIT_LIST_HEAD(&hdev->remote_oob_data);
4011 INIT_LIST_HEAD(&hdev->le_white_list);
4012 INIT_LIST_HEAD(&hdev->le_conn_params);
4013 INIT_LIST_HEAD(&hdev->pend_le_conns);
4014 INIT_LIST_HEAD(&hdev->pend_le_reports);
4015 INIT_LIST_HEAD(&hdev->conn_hash.list);
4017 INIT_WORK(&hdev->rx_work, hci_rx_work);
4018 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4019 INIT_WORK(&hdev->tx_work, hci_tx_work);
4020 INIT_WORK(&hdev->power_on, hci_power_on);
4022 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4023 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4024 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4026 skb_queue_head_init(&hdev->rx_q);
4027 skb_queue_head_init(&hdev->cmd_q);
4028 skb_queue_head_init(&hdev->raw_q);
4030 init_waitqueue_head(&hdev->req_wait_q);
4032 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4034 hci_init_sysfs(hdev);
4035 discovery_init(hdev);
4039 EXPORT_SYMBOL(hci_alloc_dev);
4041 /* Free HCI device */
4042 void hci_free_dev(struct hci_dev *hdev)
4044 /* will free via device release */
4045 put_device(&hdev->dev);
4047 EXPORT_SYMBOL(hci_free_dev);
4049 /* Register HCI device */
4050 int hci_register_dev(struct hci_dev *hdev)
4054 if (!hdev->open || !hdev->close || !hdev->send)
4057 /* Do not allow HCI_AMP devices to register at index 0,
4058 * so the index can be used as the AMP controller ID.
4060 switch (hdev->dev_type) {
4062 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4065 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4074 sprintf(hdev->name, "hci%d", id);
4077 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4079 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4080 WQ_MEM_RECLAIM, 1, hdev->name);
4081 if (!hdev->workqueue) {
4086 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4087 WQ_MEM_RECLAIM, 1, hdev->name);
4088 if (!hdev->req_workqueue) {
4089 destroy_workqueue(hdev->workqueue);
4094 if (!IS_ERR_OR_NULL(bt_debugfs))
4095 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4097 dev_set_name(&hdev->dev, "%s", hdev->name);
4099 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4101 if (IS_ERR(hdev->tfm_aes)) {
4102 BT_ERR("Unable to create crypto context");
4103 error = PTR_ERR(hdev->tfm_aes);
4104 hdev->tfm_aes = NULL;
4108 error = device_add(&hdev->dev);
4112 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4113 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4116 if (rfkill_register(hdev->rfkill) < 0) {
4117 rfkill_destroy(hdev->rfkill);
4118 hdev->rfkill = NULL;
4122 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4123 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4125 set_bit(HCI_SETUP, &hdev->dev_flags);
4126 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4128 if (hdev->dev_type == HCI_BREDR) {
4129 /* Assume BR/EDR support until proven otherwise (such as
4130 * through reading supported features during init.
4132 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4135 write_lock(&hci_dev_list_lock);
4136 list_add(&hdev->list, &hci_dev_list);
4137 write_unlock(&hci_dev_list_lock);
4139 /* Devices that are marked for raw-only usage are unconfigured
4140 * and should not be included in normal operation.
4142 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4143 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4145 hci_notify(hdev, HCI_DEV_REG);
4148 queue_work(hdev->req_workqueue, &hdev->power_on);
4153 crypto_free_blkcipher(hdev->tfm_aes);
4155 destroy_workqueue(hdev->workqueue);
4156 destroy_workqueue(hdev->req_workqueue);
4158 ida_simple_remove(&hci_index_ida, hdev->id);
4162 EXPORT_SYMBOL(hci_register_dev);
4164 /* Unregister HCI device */
4165 void hci_unregister_dev(struct hci_dev *hdev)
4169 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4171 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4175 write_lock(&hci_dev_list_lock);
4176 list_del(&hdev->list);
4177 write_unlock(&hci_dev_list_lock);
4179 hci_dev_do_close(hdev);
4181 for (i = 0; i < NUM_REASSEMBLY; i++)
4182 kfree_skb(hdev->reassembly[i]);
4184 cancel_work_sync(&hdev->power_on);
4186 if (!test_bit(HCI_INIT, &hdev->flags) &&
4187 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4188 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4190 mgmt_index_removed(hdev);
4191 hci_dev_unlock(hdev);
4194 /* mgmt_index_removed should take care of emptying the
4196 BUG_ON(!list_empty(&hdev->mgmt_pending));
4198 hci_notify(hdev, HCI_DEV_UNREG);
4201 rfkill_unregister(hdev->rfkill);
4202 rfkill_destroy(hdev->rfkill);
4206 crypto_free_blkcipher(hdev->tfm_aes);
4208 device_del(&hdev->dev);
4210 debugfs_remove_recursive(hdev->debugfs);
4212 destroy_workqueue(hdev->workqueue);
4213 destroy_workqueue(hdev->req_workqueue);
4216 hci_bdaddr_list_clear(&hdev->blacklist);
4217 hci_bdaddr_list_clear(&hdev->whitelist);
4218 hci_uuids_clear(hdev);
4219 hci_link_keys_clear(hdev);
4220 hci_smp_ltks_clear(hdev);
4221 hci_smp_irks_clear(hdev);
4222 hci_remote_oob_data_clear(hdev);
4223 hci_bdaddr_list_clear(&hdev->le_white_list);
4224 hci_conn_params_clear_all(hdev);
4225 hci_dev_unlock(hdev);
4229 ida_simple_remove(&hci_index_ida, id);
4231 EXPORT_SYMBOL(hci_unregister_dev);
4233 /* Suspend HCI device */
4234 int hci_suspend_dev(struct hci_dev *hdev)
4236 hci_notify(hdev, HCI_DEV_SUSPEND);
4239 EXPORT_SYMBOL(hci_suspend_dev);
4241 /* Resume HCI device */
4242 int hci_resume_dev(struct hci_dev *hdev)
4244 hci_notify(hdev, HCI_DEV_RESUME);
4247 EXPORT_SYMBOL(hci_resume_dev);
4249 /* Receive frame from HCI drivers */
4250 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4252 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4253 && !test_bit(HCI_INIT, &hdev->flags))) {
4259 bt_cb(skb)->incoming = 1;
4262 __net_timestamp(skb);
4264 skb_queue_tail(&hdev->rx_q, skb);
4265 queue_work(hdev->workqueue, &hdev->rx_work);
4269 EXPORT_SYMBOL(hci_recv_frame);
4271 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4272 int count, __u8 index)
4277 struct sk_buff *skb;
4278 struct bt_skb_cb *scb;
4280 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4281 index >= NUM_REASSEMBLY)
4284 skb = hdev->reassembly[index];
4288 case HCI_ACLDATA_PKT:
4289 len = HCI_MAX_FRAME_SIZE;
4290 hlen = HCI_ACL_HDR_SIZE;
4293 len = HCI_MAX_EVENT_SIZE;
4294 hlen = HCI_EVENT_HDR_SIZE;
4296 case HCI_SCODATA_PKT:
4297 len = HCI_MAX_SCO_SIZE;
4298 hlen = HCI_SCO_HDR_SIZE;
4302 skb = bt_skb_alloc(len, GFP_ATOMIC);
4306 scb = (void *) skb->cb;
4308 scb->pkt_type = type;
4310 hdev->reassembly[index] = skb;
4314 scb = (void *) skb->cb;
4315 len = min_t(uint, scb->expect, count);
4317 memcpy(skb_put(skb, len), data, len);
4326 if (skb->len == HCI_EVENT_HDR_SIZE) {
4327 struct hci_event_hdr *h = hci_event_hdr(skb);
4328 scb->expect = h->plen;
4330 if (skb_tailroom(skb) < scb->expect) {
4332 hdev->reassembly[index] = NULL;
4338 case HCI_ACLDATA_PKT:
4339 if (skb->len == HCI_ACL_HDR_SIZE) {
4340 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4341 scb->expect = __le16_to_cpu(h->dlen);
4343 if (skb_tailroom(skb) < scb->expect) {
4345 hdev->reassembly[index] = NULL;
4351 case HCI_SCODATA_PKT:
4352 if (skb->len == HCI_SCO_HDR_SIZE) {
4353 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4354 scb->expect = h->dlen;
4356 if (skb_tailroom(skb) < scb->expect) {
4358 hdev->reassembly[index] = NULL;
4365 if (scb->expect == 0) {
4366 /* Complete frame */
4368 bt_cb(skb)->pkt_type = type;
4369 hci_recv_frame(hdev, skb);
4371 hdev->reassembly[index] = NULL;
4379 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4383 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4387 rem = hci_reassembly(hdev, type, data, count, type - 1);
4391 data += (count - rem);
4397 EXPORT_SYMBOL(hci_recv_fragment);
4399 #define STREAM_REASSEMBLY 0
4401 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4407 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4410 struct { char type; } *pkt;
4412 /* Start of the frame */
4419 type = bt_cb(skb)->pkt_type;
4421 rem = hci_reassembly(hdev, type, data, count,
4426 data += (count - rem);
4432 EXPORT_SYMBOL(hci_recv_stream_fragment);
4434 /* ---- Interface to upper protocols ---- */
4436 int hci_register_cb(struct hci_cb *cb)
4438 BT_DBG("%p name %s", cb, cb->name);
4440 write_lock(&hci_cb_list_lock);
4441 list_add(&cb->list, &hci_cb_list);
4442 write_unlock(&hci_cb_list_lock);
4446 EXPORT_SYMBOL(hci_register_cb);
4448 int hci_unregister_cb(struct hci_cb *cb)
4450 BT_DBG("%p name %s", cb, cb->name);
4452 write_lock(&hci_cb_list_lock);
4453 list_del(&cb->list);
4454 write_unlock(&hci_cb_list_lock);
4458 EXPORT_SYMBOL(hci_unregister_cb);
4460 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4464 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4467 __net_timestamp(skb);
4469 /* Send copy to monitor */
4470 hci_send_to_monitor(hdev, skb);
4472 if (atomic_read(&hdev->promisc)) {
4473 /* Send copy to the sockets */
4474 hci_send_to_sock(hdev, skb);
4477 /* Get rid of skb owner, prior to sending to the driver. */
4480 err = hdev->send(hdev, skb);
4482 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4487 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4489 skb_queue_head_init(&req->cmd_q);
4494 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4496 struct hci_dev *hdev = req->hdev;
4497 struct sk_buff *skb;
4498 unsigned long flags;
4500 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4502 /* If an error occured during request building, remove all HCI
4503 * commands queued on the HCI request queue.
4506 skb_queue_purge(&req->cmd_q);
4510 /* Do not allow empty requests */
4511 if (skb_queue_empty(&req->cmd_q))
4514 skb = skb_peek_tail(&req->cmd_q);
4515 bt_cb(skb)->req.complete = complete;
4517 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4518 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4519 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4521 queue_work(hdev->workqueue, &hdev->cmd_work);
4526 bool hci_req_pending(struct hci_dev *hdev)
4528 return (hdev->req_status == HCI_REQ_PEND);
4531 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4532 u32 plen, const void *param)
4534 int len = HCI_COMMAND_HDR_SIZE + plen;
4535 struct hci_command_hdr *hdr;
4536 struct sk_buff *skb;
4538 skb = bt_skb_alloc(len, GFP_ATOMIC);
4542 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4543 hdr->opcode = cpu_to_le16(opcode);
4547 memcpy(skb_put(skb, plen), param, plen);
4549 BT_DBG("skb len %d", skb->len);
4551 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4556 /* Send HCI command */
4557 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4560 struct sk_buff *skb;
4562 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4564 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4566 BT_ERR("%s no memory for command", hdev->name);
4570 /* Stand-alone HCI commands must be flaged as
4571 * single-command requests.
4573 bt_cb(skb)->req.start = true;
4575 skb_queue_tail(&hdev->cmd_q, skb);
4576 queue_work(hdev->workqueue, &hdev->cmd_work);
4581 /* Queue a command to an asynchronous HCI request */
4582 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4583 const void *param, u8 event)
4585 struct hci_dev *hdev = req->hdev;
4586 struct sk_buff *skb;
4588 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4590 /* If an error occured during request building, there is no point in
4591 * queueing the HCI command. We can simply return.
4596 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4598 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4599 hdev->name, opcode);
4604 if (skb_queue_empty(&req->cmd_q))
4605 bt_cb(skb)->req.start = true;
4607 bt_cb(skb)->req.event = event;
4609 skb_queue_tail(&req->cmd_q, skb);
4612 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4615 hci_req_add_ev(req, opcode, plen, param, 0);
4618 /* Get data from the previously sent command */
4619 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4621 struct hci_command_hdr *hdr;
4623 if (!hdev->sent_cmd)
4626 hdr = (void *) hdev->sent_cmd->data;
4628 if (hdr->opcode != cpu_to_le16(opcode))
4631 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4633 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4637 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4639 struct hci_acl_hdr *hdr;
4642 skb_push(skb, HCI_ACL_HDR_SIZE);
4643 skb_reset_transport_header(skb);
4644 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4645 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4646 hdr->dlen = cpu_to_le16(len);
4649 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4650 struct sk_buff *skb, __u16 flags)
4652 struct hci_conn *conn = chan->conn;
4653 struct hci_dev *hdev = conn->hdev;
4654 struct sk_buff *list;
4656 skb->len = skb_headlen(skb);
4659 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4661 switch (hdev->dev_type) {
4663 hci_add_acl_hdr(skb, conn->handle, flags);
4666 hci_add_acl_hdr(skb, chan->handle, flags);
4669 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4673 list = skb_shinfo(skb)->frag_list;
4675 /* Non fragmented */
4676 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4678 skb_queue_tail(queue, skb);
4681 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4683 skb_shinfo(skb)->frag_list = NULL;
4685 /* Queue all fragments atomically */
4686 spin_lock(&queue->lock);
4688 __skb_queue_tail(queue, skb);
4690 flags &= ~ACL_START;
4693 skb = list; list = list->next;
4695 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4696 hci_add_acl_hdr(skb, conn->handle, flags);
4698 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4700 __skb_queue_tail(queue, skb);
4703 spin_unlock(&queue->lock);
4707 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4709 struct hci_dev *hdev = chan->conn->hdev;
4711 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4713 hci_queue_acl(chan, &chan->data_q, skb, flags);
4715 queue_work(hdev->workqueue, &hdev->tx_work);
4719 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4721 struct hci_dev *hdev = conn->hdev;
4722 struct hci_sco_hdr hdr;
4724 BT_DBG("%s len %d", hdev->name, skb->len);
4726 hdr.handle = cpu_to_le16(conn->handle);
4727 hdr.dlen = skb->len;
4729 skb_push(skb, HCI_SCO_HDR_SIZE);
4730 skb_reset_transport_header(skb);
4731 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4733 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4735 skb_queue_tail(&conn->data_q, skb);
4736 queue_work(hdev->workqueue, &hdev->tx_work);
4739 /* ---- HCI TX task (outgoing data) ---- */
4741 /* HCI Connection scheduler */
4742 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4745 struct hci_conn_hash *h = &hdev->conn_hash;
4746 struct hci_conn *conn = NULL, *c;
4747 unsigned int num = 0, min = ~0;
4749 /* We don't have to lock device here. Connections are always
4750 * added and removed with TX task disabled. */
4754 list_for_each_entry_rcu(c, &h->list, list) {
4755 if (c->type != type || skb_queue_empty(&c->data_q))
4758 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4763 if (c->sent < min) {
4768 if (hci_conn_num(hdev, type) == num)
4777 switch (conn->type) {
4779 cnt = hdev->acl_cnt;
4783 cnt = hdev->sco_cnt;
4786 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4790 BT_ERR("Unknown link type");
4798 BT_DBG("conn %p quote %d", conn, *quote);
4802 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4804 struct hci_conn_hash *h = &hdev->conn_hash;
4807 BT_ERR("%s link tx timeout", hdev->name);
4811 /* Kill stalled connections */
4812 list_for_each_entry_rcu(c, &h->list, list) {
4813 if (c->type == type && c->sent) {
4814 BT_ERR("%s killing stalled connection %pMR",
4815 hdev->name, &c->dst);
4816 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4823 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4826 struct hci_conn_hash *h = &hdev->conn_hash;
4827 struct hci_chan *chan = NULL;
4828 unsigned int num = 0, min = ~0, cur_prio = 0;
4829 struct hci_conn *conn;
4830 int cnt, q, conn_num = 0;
4832 BT_DBG("%s", hdev->name);
4836 list_for_each_entry_rcu(conn, &h->list, list) {
4837 struct hci_chan *tmp;
4839 if (conn->type != type)
4842 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4847 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4848 struct sk_buff *skb;
4850 if (skb_queue_empty(&tmp->data_q))
4853 skb = skb_peek(&tmp->data_q);
4854 if (skb->priority < cur_prio)
4857 if (skb->priority > cur_prio) {
4860 cur_prio = skb->priority;
4865 if (conn->sent < min) {
4871 if (hci_conn_num(hdev, type) == conn_num)
4880 switch (chan->conn->type) {
4882 cnt = hdev->acl_cnt;
4885 cnt = hdev->block_cnt;
4889 cnt = hdev->sco_cnt;
4892 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4896 BT_ERR("Unknown link type");
4901 BT_DBG("chan %p quote %d", chan, *quote);
4905 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4907 struct hci_conn_hash *h = &hdev->conn_hash;
4908 struct hci_conn *conn;
4911 BT_DBG("%s", hdev->name);
4915 list_for_each_entry_rcu(conn, &h->list, list) {
4916 struct hci_chan *chan;
4918 if (conn->type != type)
4921 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4926 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4927 struct sk_buff *skb;
4934 if (skb_queue_empty(&chan->data_q))
4937 skb = skb_peek(&chan->data_q);
4938 if (skb->priority >= HCI_PRIO_MAX - 1)
4941 skb->priority = HCI_PRIO_MAX - 1;
4943 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4947 if (hci_conn_num(hdev, type) == num)
4955 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4957 /* Calculate count of blocks used by this packet */
4958 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4961 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4963 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4964 /* ACL tx timeout must be longer than maximum
4965 * link supervision timeout (40.9 seconds) */
4966 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4967 HCI_ACL_TX_TIMEOUT))
4968 hci_link_tx_to(hdev, ACL_LINK);
4972 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4974 unsigned int cnt = hdev->acl_cnt;
4975 struct hci_chan *chan;
4976 struct sk_buff *skb;
4979 __check_timeout(hdev, cnt);
4981 while (hdev->acl_cnt &&
4982 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4983 u32 priority = (skb_peek(&chan->data_q))->priority;
4984 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4985 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4986 skb->len, skb->priority);
4988 /* Stop if priority has changed */
4989 if (skb->priority < priority)
4992 skb = skb_dequeue(&chan->data_q);
4994 hci_conn_enter_active_mode(chan->conn,
4995 bt_cb(skb)->force_active);
4997 hci_send_frame(hdev, skb);
4998 hdev->acl_last_tx = jiffies;
5006 if (cnt != hdev->acl_cnt)
5007 hci_prio_recalculate(hdev, ACL_LINK);
5010 static void hci_sched_acl_blk(struct hci_dev *hdev)
5012 unsigned int cnt = hdev->block_cnt;
5013 struct hci_chan *chan;
5014 struct sk_buff *skb;
5018 __check_timeout(hdev, cnt);
5020 BT_DBG("%s", hdev->name);
5022 if (hdev->dev_type == HCI_AMP)
5027 while (hdev->block_cnt > 0 &&
5028 (chan = hci_chan_sent(hdev, type, "e))) {
5029 u32 priority = (skb_peek(&chan->data_q))->priority;
5030 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5033 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5034 skb->len, skb->priority);
5036 /* Stop if priority has changed */
5037 if (skb->priority < priority)
5040 skb = skb_dequeue(&chan->data_q);
5042 blocks = __get_blocks(hdev, skb);
5043 if (blocks > hdev->block_cnt)
5046 hci_conn_enter_active_mode(chan->conn,
5047 bt_cb(skb)->force_active);
5049 hci_send_frame(hdev, skb);
5050 hdev->acl_last_tx = jiffies;
5052 hdev->block_cnt -= blocks;
5055 chan->sent += blocks;
5056 chan->conn->sent += blocks;
5060 if (cnt != hdev->block_cnt)
5061 hci_prio_recalculate(hdev, type);
5064 static void hci_sched_acl(struct hci_dev *hdev)
5066 BT_DBG("%s", hdev->name);
5068 /* No ACL link over BR/EDR controller */
5069 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5072 /* No AMP link over AMP controller */
5073 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5076 switch (hdev->flow_ctl_mode) {
5077 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5078 hci_sched_acl_pkt(hdev);
5081 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5082 hci_sched_acl_blk(hdev);
5088 static void hci_sched_sco(struct hci_dev *hdev)
5090 struct hci_conn *conn;
5091 struct sk_buff *skb;
5094 BT_DBG("%s", hdev->name);
5096 if (!hci_conn_num(hdev, SCO_LINK))
5099 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
5100 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5101 BT_DBG("skb %p len %d", skb, skb->len);
5102 hci_send_frame(hdev, skb);
5105 if (conn->sent == ~0)
5111 static void hci_sched_esco(struct hci_dev *hdev)
5113 struct hci_conn *conn;
5114 struct sk_buff *skb;
5117 BT_DBG("%s", hdev->name);
5119 if (!hci_conn_num(hdev, ESCO_LINK))
5122 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5124 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5125 BT_DBG("skb %p len %d", skb, skb->len);
5126 hci_send_frame(hdev, skb);
5129 if (conn->sent == ~0)
5135 static void hci_sched_le(struct hci_dev *hdev)
5137 struct hci_chan *chan;
5138 struct sk_buff *skb;
5139 int quote, cnt, tmp;
5141 BT_DBG("%s", hdev->name);
5143 if (!hci_conn_num(hdev, LE_LINK))
5146 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5147 /* LE tx timeout must be longer than maximum
5148 * link supervision timeout (40.9 seconds) */
5149 if (!hdev->le_cnt && hdev->le_pkts &&
5150 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5151 hci_link_tx_to(hdev, LE_LINK);
5154 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5156 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
5157 u32 priority = (skb_peek(&chan->data_q))->priority;
5158 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5159 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5160 skb->len, skb->priority);
5162 /* Stop if priority has changed */
5163 if (skb->priority < priority)
5166 skb = skb_dequeue(&chan->data_q);
5168 hci_send_frame(hdev, skb);
5169 hdev->le_last_tx = jiffies;
5180 hdev->acl_cnt = cnt;
5183 hci_prio_recalculate(hdev, LE_LINK);
5186 static void hci_tx_work(struct work_struct *work)
5188 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5189 struct sk_buff *skb;
5191 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5192 hdev->sco_cnt, hdev->le_cnt);
5194 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5195 /* Schedule queues and send stuff to HCI driver */
5196 hci_sched_acl(hdev);
5197 hci_sched_sco(hdev);
5198 hci_sched_esco(hdev);
5202 /* Send next queued raw (unknown type) packet */
5203 while ((skb = skb_dequeue(&hdev->raw_q)))
5204 hci_send_frame(hdev, skb);
5207 /* ----- HCI RX task (incoming data processing) ----- */
5209 /* ACL data packet */
5210 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5212 struct hci_acl_hdr *hdr = (void *) skb->data;
5213 struct hci_conn *conn;
5214 __u16 handle, flags;
5216 skb_pull(skb, HCI_ACL_HDR_SIZE);
5218 handle = __le16_to_cpu(hdr->handle);
5219 flags = hci_flags(handle);
5220 handle = hci_handle(handle);
5222 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5225 hdev->stat.acl_rx++;
5228 conn = hci_conn_hash_lookup_handle(hdev, handle);
5229 hci_dev_unlock(hdev);
5232 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5234 /* Send to upper protocol */
5235 l2cap_recv_acldata(conn, skb, flags);
5238 BT_ERR("%s ACL packet for unknown connection handle %d",
5239 hdev->name, handle);
5245 /* SCO data packet */
5246 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5248 struct hci_sco_hdr *hdr = (void *) skb->data;
5249 struct hci_conn *conn;
5252 skb_pull(skb, HCI_SCO_HDR_SIZE);
5254 handle = __le16_to_cpu(hdr->handle);
5256 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5258 hdev->stat.sco_rx++;
5261 conn = hci_conn_hash_lookup_handle(hdev, handle);
5262 hci_dev_unlock(hdev);
5265 /* Send to upper protocol */
5266 sco_recv_scodata(conn, skb);
5269 BT_ERR("%s SCO packet for unknown connection handle %d",
5270 hdev->name, handle);
5276 static bool hci_req_is_complete(struct hci_dev *hdev)
5278 struct sk_buff *skb;
5280 skb = skb_peek(&hdev->cmd_q);
5284 return bt_cb(skb)->req.start;
5287 static void hci_resend_last(struct hci_dev *hdev)
5289 struct hci_command_hdr *sent;
5290 struct sk_buff *skb;
5293 if (!hdev->sent_cmd)
5296 sent = (void *) hdev->sent_cmd->data;
5297 opcode = __le16_to_cpu(sent->opcode);
5298 if (opcode == HCI_OP_RESET)
5301 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5305 skb_queue_head(&hdev->cmd_q, skb);
5306 queue_work(hdev->workqueue, &hdev->cmd_work);
5309 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5311 hci_req_complete_t req_complete = NULL;
5312 struct sk_buff *skb;
5313 unsigned long flags;
5315 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5317 /* If the completed command doesn't match the last one that was
5318 * sent we need to do special handling of it.
5320 if (!hci_sent_cmd_data(hdev, opcode)) {
5321 /* Some CSR based controllers generate a spontaneous
5322 * reset complete event during init and any pending
5323 * command will never be completed. In such a case we
5324 * need to resend whatever was the last sent
5327 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5328 hci_resend_last(hdev);
5333 /* If the command succeeded and there's still more commands in
5334 * this request the request is not yet complete.
5336 if (!status && !hci_req_is_complete(hdev))
5339 /* If this was the last command in a request the complete
5340 * callback would be found in hdev->sent_cmd instead of the
5341 * command queue (hdev->cmd_q).
5343 if (hdev->sent_cmd) {
5344 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5347 /* We must set the complete callback to NULL to
5348 * avoid calling the callback more than once if
5349 * this function gets called again.
5351 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5357 /* Remove all pending commands belonging to this request */
5358 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5359 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5360 if (bt_cb(skb)->req.start) {
5361 __skb_queue_head(&hdev->cmd_q, skb);
5365 req_complete = bt_cb(skb)->req.complete;
5368 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5372 req_complete(hdev, status);
5375 static void hci_rx_work(struct work_struct *work)
5377 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5378 struct sk_buff *skb;
5380 BT_DBG("%s", hdev->name);
5382 while ((skb = skb_dequeue(&hdev->rx_q))) {
5383 /* Send copy to monitor */
5384 hci_send_to_monitor(hdev, skb);
5386 if (atomic_read(&hdev->promisc)) {
5387 /* Send copy to the sockets */
5388 hci_send_to_sock(hdev, skb);
5391 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5396 if (test_bit(HCI_INIT, &hdev->flags)) {
5397 /* Don't process data packets in this states. */
5398 switch (bt_cb(skb)->pkt_type) {
5399 case HCI_ACLDATA_PKT:
5400 case HCI_SCODATA_PKT:
5407 switch (bt_cb(skb)->pkt_type) {
5409 BT_DBG("%s Event packet", hdev->name);
5410 hci_event_packet(hdev, skb);
5413 case HCI_ACLDATA_PKT:
5414 BT_DBG("%s ACL data packet", hdev->name);
5415 hci_acldata_packet(hdev, skb);
5418 case HCI_SCODATA_PKT:
5419 BT_DBG("%s SCO data packet", hdev->name);
5420 hci_scodata_packet(hdev, skb);
5430 static void hci_cmd_work(struct work_struct *work)
5432 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5433 struct sk_buff *skb;
5435 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5436 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5438 /* Send queued commands */
5439 if (atomic_read(&hdev->cmd_cnt)) {
5440 skb = skb_dequeue(&hdev->cmd_q);
5444 kfree_skb(hdev->sent_cmd);
5446 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5447 if (hdev->sent_cmd) {
5448 atomic_dec(&hdev->cmd_cnt);
5449 hci_send_frame(hdev, skb);
5450 if (test_bit(HCI_RESET, &hdev->flags))
5451 cancel_delayed_work(&hdev->cmd_timer);
5453 schedule_delayed_work(&hdev->cmd_timer,
5456 skb_queue_head(&hdev->cmd_q, skb);
5457 queue_work(hdev->workqueue, &hdev->cmd_work);
5462 void hci_req_add_le_scan_disable(struct hci_request *req)
5464 struct hci_cp_le_set_scan_enable cp;
5466 memset(&cp, 0, sizeof(cp));
5467 cp.enable = LE_SCAN_DISABLE;
5468 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5471 static void add_to_white_list(struct hci_request *req,
5472 struct hci_conn_params *params)
5474 struct hci_cp_le_add_to_white_list cp;
5476 cp.bdaddr_type = params->addr_type;
5477 bacpy(&cp.bdaddr, ¶ms->addr);
5479 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5482 static u8 update_white_list(struct hci_request *req)
5484 struct hci_dev *hdev = req->hdev;
5485 struct hci_conn_params *params;
5486 struct bdaddr_list *b;
5487 uint8_t white_list_entries = 0;
5489 /* Go through the current white list programmed into the
5490 * controller one by one and check if that address is still
5491 * in the list of pending connections or list of devices to
5492 * report. If not present in either list, then queue the
5493 * command to remove it from the controller.
5495 list_for_each_entry(b, &hdev->le_white_list, list) {
5496 struct hci_cp_le_del_from_white_list cp;
5498 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5499 &b->bdaddr, b->bdaddr_type) ||
5500 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5501 &b->bdaddr, b->bdaddr_type)) {
5502 white_list_entries++;
5506 cp.bdaddr_type = b->bdaddr_type;
5507 bacpy(&cp.bdaddr, &b->bdaddr);
5509 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5513 /* Since all no longer valid white list entries have been
5514 * removed, walk through the list of pending connections
5515 * and ensure that any new device gets programmed into
5518 * If the list of the devices is larger than the list of
5519 * available white list entries in the controller, then
5520 * just abort and return filer policy value to not use the
5523 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5524 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5525 ¶ms->addr, params->addr_type))
5528 if (white_list_entries >= hdev->le_white_list_size) {
5529 /* Select filter policy to accept all advertising */
5533 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
5534 params->addr_type)) {
5535 /* White list can not be used with RPAs */
5539 white_list_entries++;
5540 add_to_white_list(req, params);
5543 /* After adding all new pending connections, walk through
5544 * the list of pending reports and also add these to the
5545 * white list if there is still space.
5547 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5548 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5549 ¶ms->addr, params->addr_type))
5552 if (white_list_entries >= hdev->le_white_list_size) {
5553 /* Select filter policy to accept all advertising */
5557 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
5558 params->addr_type)) {
5559 /* White list can not be used with RPAs */
5563 white_list_entries++;
5564 add_to_white_list(req, params);
5567 /* Select filter policy to use white list */
5571 void hci_req_add_le_passive_scan(struct hci_request *req)
5573 struct hci_cp_le_set_scan_param param_cp;
5574 struct hci_cp_le_set_scan_enable enable_cp;
5575 struct hci_dev *hdev = req->hdev;
5579 /* Set require_privacy to false since no SCAN_REQ are send
5580 * during passive scanning. Not using an unresolvable address
5581 * here is important so that peer devices using direct
5582 * advertising with our address will be correctly reported
5583 * by the controller.
5585 if (hci_update_random_address(req, false, &own_addr_type))
5588 /* Adding or removing entries from the white list must
5589 * happen before enabling scanning. The controller does
5590 * not allow white list modification while scanning.
5592 filter_policy = update_white_list(req);
5594 memset(¶m_cp, 0, sizeof(param_cp));
5595 param_cp.type = LE_SCAN_PASSIVE;
5596 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5597 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5598 param_cp.own_address_type = own_addr_type;
5599 param_cp.filter_policy = filter_policy;
5600 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5603 memset(&enable_cp, 0, sizeof(enable_cp));
5604 enable_cp.enable = LE_SCAN_ENABLE;
5605 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5606 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5610 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5613 BT_DBG("HCI request failed to update background scanning: "
5614 "status 0x%2.2x", status);
5617 /* This function controls the background scanning based on hdev->pend_le_conns
5618 * list. If there are pending LE connection we start the background scanning,
5619 * otherwise we stop it.
5621 * This function requires the caller holds hdev->lock.
5623 void hci_update_background_scan(struct hci_dev *hdev)
5625 struct hci_request req;
5626 struct hci_conn *conn;
5629 if (!test_bit(HCI_UP, &hdev->flags) ||
5630 test_bit(HCI_INIT, &hdev->flags) ||
5631 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5632 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5633 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5634 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5637 /* No point in doing scanning if LE support hasn't been enabled */
5638 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5641 /* If discovery is active don't interfere with it */
5642 if (hdev->discovery.state != DISCOVERY_STOPPED)
5645 hci_req_init(&req, hdev);
5647 if (list_empty(&hdev->pend_le_conns) &&
5648 list_empty(&hdev->pend_le_reports)) {
5649 /* If there is no pending LE connections or devices
5650 * to be scanned for, we should stop the background
5654 /* If controller is not scanning we are done. */
5655 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5658 hci_req_add_le_scan_disable(&req);
5660 BT_DBG("%s stopping background scanning", hdev->name);
5662 /* If there is at least one pending LE connection, we should
5663 * keep the background scan running.
5666 /* If controller is connecting, we should not start scanning
5667 * since some controllers are not able to scan and connect at
5670 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5674 /* If controller is currently scanning, we stop it to ensure we
5675 * don't miss any advertising (due to duplicates filter).
5677 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5678 hci_req_add_le_scan_disable(&req);
5680 hci_req_add_le_passive_scan(&req);
5682 BT_DBG("%s starting background scanning", hdev->name);
5685 err = hci_req_run(&req, update_background_scan_complete);
5687 BT_ERR("Failed to run HCI request: err %d", err);